@fugood/llama.node 0.3.2 → 0.3.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CMakeLists.txt +7 -0
- package/bin/darwin/arm64/llama-node.node +0 -0
- package/bin/darwin/x64/llama-node.node +0 -0
- package/bin/linux/arm64/llama-node.node +0 -0
- package/bin/linux/x64/llama-node.node +0 -0
- package/bin/linux-vulkan/arm64/llama-node.node +0 -0
- package/bin/linux-vulkan/x64/llama-node.node +0 -0
- package/bin/win32/arm64/llama-node.node +0 -0
- package/bin/win32/arm64/node.lib +0 -0
- package/bin/win32/x64/llama-node.node +0 -0
- package/bin/win32/x64/node.lib +0 -0
- package/bin/win32-vulkan/arm64/llama-node.node +0 -0
- package/bin/win32-vulkan/arm64/node.lib +0 -0
- package/bin/win32-vulkan/x64/llama-node.node +0 -0
- package/bin/win32-vulkan/x64/node.lib +0 -0
- package/lib/binding.ts +18 -1
- package/package.json +1 -1
- package/src/DetokenizeWorker.cpp +1 -1
- package/src/EmbeddingWorker.cpp +17 -7
- package/src/EmbeddingWorker.h +2 -1
- package/src/LlamaCompletionWorker.cpp +8 -8
- package/src/LlamaCompletionWorker.h +2 -2
- package/src/LlamaContext.cpp +89 -27
- package/src/LlamaContext.h +2 -0
- package/src/TokenizeWorker.cpp +1 -1
- package/src/common.hpp +4 -4
- package/src/llama.cpp/.github/workflows/build.yml +240 -168
- package/src/llama.cpp/.github/workflows/docker.yml +8 -8
- package/src/llama.cpp/.github/workflows/python-lint.yml +8 -1
- package/src/llama.cpp/.github/workflows/server.yml +21 -14
- package/src/llama.cpp/CMakeLists.txt +14 -6
- package/src/llama.cpp/Sources/llama/llama.h +4 -0
- package/src/llama.cpp/cmake/arm64-apple-clang.cmake +16 -0
- package/src/llama.cpp/cmake/common.cmake +33 -0
- package/src/llama.cpp/cmake/x64-windows-llvm.cmake +11 -0
- package/src/llama.cpp/common/CMakeLists.txt +6 -4
- package/src/llama.cpp/common/arg.cpp +986 -770
- package/src/llama.cpp/common/arg.h +22 -22
- package/src/llama.cpp/common/common.cpp +212 -351
- package/src/llama.cpp/common/common.h +204 -117
- package/src/llama.cpp/common/json-schema-to-grammar.cpp +1 -1
- package/src/llama.cpp/common/log.cpp +50 -50
- package/src/llama.cpp/common/log.h +18 -18
- package/src/llama.cpp/common/ngram-cache.cpp +36 -36
- package/src/llama.cpp/common/ngram-cache.h +19 -19
- package/src/llama.cpp/common/sampling.cpp +163 -121
- package/src/llama.cpp/common/sampling.h +41 -20
- package/src/llama.cpp/common/speculative.cpp +274 -0
- package/src/llama.cpp/common/speculative.h +28 -0
- package/src/llama.cpp/docs/build.md +134 -161
- package/src/llama.cpp/examples/CMakeLists.txt +33 -14
- package/src/llama.cpp/examples/batched/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/batched/batched.cpp +19 -18
- package/src/llama.cpp/examples/batched-bench/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/batched-bench/batched-bench.cpp +10 -11
- package/src/llama.cpp/examples/convert-llama2c-to-ggml/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +1 -1
- package/src/llama.cpp/examples/cvector-generator/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/cvector-generator/cvector-generator.cpp +9 -9
- package/src/llama.cpp/examples/deprecation-warning/deprecation-warning.cpp +1 -1
- package/src/llama.cpp/examples/embedding/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/embedding/embedding.cpp +12 -12
- package/src/llama.cpp/examples/eval-callback/CMakeLists.txt +3 -2
- package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +8 -8
- package/src/llama.cpp/examples/export-lora/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/export-lora/export-lora.cpp +5 -5
- package/src/llama.cpp/examples/gbnf-validator/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/gbnf-validator/gbnf-validator.cpp +4 -7
- package/src/llama.cpp/examples/gen-docs/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/gen-docs/gen-docs.cpp +7 -7
- package/src/llama.cpp/examples/gguf/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/gguf-hash/CMakeLists.txt +8 -1
- package/src/llama.cpp/examples/gguf-split/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/gguf-split/gguf-split.cpp +2 -2
- package/src/llama.cpp/examples/gritlm/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/gritlm/gritlm.cpp +18 -18
- package/src/llama.cpp/examples/imatrix/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/imatrix/imatrix.cpp +31 -13
- package/src/llama.cpp/examples/infill/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/infill/infill.cpp +41 -87
- package/src/llama.cpp/examples/llama-bench/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/llama-bench/llama-bench.cpp +439 -459
- package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +2 -0
- package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +11 -14
- package/src/llama.cpp/examples/llava/CMakeLists.txt +10 -3
- package/src/llama.cpp/examples/llava/clip.cpp +263 -66
- package/src/llama.cpp/examples/llava/clip.h +8 -2
- package/src/llama.cpp/examples/llava/llava-cli.cpp +23 -23
- package/src/llama.cpp/examples/llava/llava.cpp +83 -22
- package/src/llama.cpp/examples/llava/minicpmv-cli.cpp +21 -21
- package/src/llama.cpp/examples/llava/qwen2vl-cli.cpp +581 -0
- package/src/llama.cpp/examples/lookahead/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/lookahead/lookahead.cpp +26 -26
- package/src/llama.cpp/examples/lookup/CMakeLists.txt +4 -4
- package/src/llama.cpp/examples/lookup/lookup-create.cpp +7 -7
- package/src/llama.cpp/examples/lookup/lookup-merge.cpp +4 -4
- package/src/llama.cpp/examples/lookup/lookup-stats.cpp +16 -15
- package/src/llama.cpp/examples/lookup/lookup.cpp +30 -30
- package/src/llama.cpp/examples/main/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/main/main.cpp +73 -114
- package/src/llama.cpp/examples/main-cmake-pkg/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/parallel/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/parallel/parallel.cpp +18 -19
- package/src/llama.cpp/examples/passkey/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/passkey/passkey.cpp +14 -14
- package/src/llama.cpp/examples/perplexity/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/perplexity/perplexity.cpp +99 -120
- package/src/llama.cpp/examples/quantize/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/quantize/quantize.cpp +0 -3
- package/src/llama.cpp/examples/quantize-stats/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/quantize-stats/quantize-stats.cpp +10 -9
- package/src/llama.cpp/examples/retrieval/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/retrieval/retrieval.cpp +16 -16
- package/src/llama.cpp/examples/rpc/rpc-server.cpp +3 -1
- package/src/llama.cpp/examples/run/CMakeLists.txt +5 -0
- package/src/llama.cpp/examples/run/run.cpp +911 -0
- package/src/llama.cpp/examples/save-load-state/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +38 -21
- package/src/llama.cpp/examples/server/CMakeLists.txt +3 -16
- package/src/llama.cpp/examples/server/server.cpp +2073 -1339
- package/src/llama.cpp/examples/server/tests/requirements.txt +2 -2
- package/src/llama.cpp/examples/server/utils.hpp +354 -277
- package/src/llama.cpp/examples/simple/CMakeLists.txt +2 -2
- package/src/llama.cpp/examples/simple/simple.cpp +130 -94
- package/src/llama.cpp/examples/simple-chat/CMakeLists.txt +5 -0
- package/src/llama.cpp/examples/simple-chat/simple-chat.cpp +200 -0
- package/src/llama.cpp/examples/speculative/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/speculative/speculative.cpp +68 -64
- package/src/llama.cpp/examples/speculative-simple/CMakeLists.txt +5 -0
- package/src/llama.cpp/examples/speculative-simple/speculative-simple.cpp +265 -0
- package/src/llama.cpp/examples/tokenize/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/tokenize/tokenize.cpp +3 -3
- package/src/llama.cpp/examples/tts/CMakeLists.txt +5 -0
- package/src/llama.cpp/examples/tts/tts.cpp +932 -0
- package/src/llama.cpp/ggml/CMakeLists.txt +54 -36
- package/src/llama.cpp/ggml/include/ggml-backend.h +63 -34
- package/src/llama.cpp/ggml/include/ggml-blas.h +5 -3
- package/src/llama.cpp/ggml/include/ggml-cann.h +9 -7
- package/src/llama.cpp/ggml/include/ggml-cpp.h +38 -0
- package/src/llama.cpp/ggml/include/ggml-cpu.h +135 -0
- package/src/llama.cpp/ggml/include/ggml-cuda.h +12 -12
- package/src/llama.cpp/ggml/include/ggml-kompute.h +7 -3
- package/src/llama.cpp/ggml/include/ggml-metal.h +11 -7
- package/src/llama.cpp/ggml/include/ggml-opencl.h +26 -0
- package/src/llama.cpp/ggml/include/ggml-opt.h +216 -0
- package/src/llama.cpp/ggml/include/ggml-rpc.h +9 -5
- package/src/llama.cpp/ggml/include/ggml-sycl.h +18 -11
- package/src/llama.cpp/ggml/include/ggml-vulkan.h +10 -8
- package/src/llama.cpp/ggml/include/ggml.h +159 -417
- package/src/llama.cpp/ggml/src/CMakeLists.txt +121 -1155
- package/src/llama.cpp/ggml/src/ggml-alloc.c +23 -28
- package/src/llama.cpp/ggml/src/ggml-backend-impl.h +57 -36
- package/src/llama.cpp/ggml/src/ggml-backend-reg.cpp +552 -0
- package/src/llama.cpp/ggml/src/ggml-backend.cpp +306 -867
- package/src/llama.cpp/ggml/src/ggml-blas/CMakeLists.txt +87 -0
- package/src/llama.cpp/ggml/src/{ggml-blas.cpp → ggml-blas/ggml-blas.cpp} +216 -65
- package/src/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +76 -0
- package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +456 -111
- package/src/llama.cpp/ggml/src/ggml-cann/common.h +6 -3
- package/src/llama.cpp/ggml/src/{ggml-cann.cpp → ggml-cann/ggml-cann.cpp} +343 -177
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/CMakeLists.txt +2 -5
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/dup.cpp +22 -9
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_f16.cpp +24 -13
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_f32.cpp +23 -13
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_q4_0.cpp +11 -0
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/quantize_f16_q8_0.cpp +10 -0
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/quantize_f32_q8_0.cpp +10 -0
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp +17 -0
- package/src/llama.cpp/ggml/src/ggml-common.h +42 -42
- package/src/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +336 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/amx/amx.cpp +220 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/amx/amx.h +8 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/amx/common.h +91 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/amx/mmq.cpp +2511 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/amx/mmq.h +10 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/cpu-feats-x86.cpp +323 -0
- package/src/llama.cpp/ggml/src/{ggml-aarch64.c → ggml-cpu/ggml-cpu-aarch64.cpp} +1299 -246
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-aarch64.h +8 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-hbm.cpp +55 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-hbm.h +8 -0
- package/src/llama.cpp/ggml/src/{ggml-cpu-impl.h → ggml-cpu/ggml-cpu-impl.h} +14 -242
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.c +10835 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.h +63 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-traits.cpp +36 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-traits.h +38 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c +14123 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.cpp +628 -0
- package/src/llama.cpp/ggml/src/{llamafile → ggml-cpu/llamafile}/sgemm.cpp +666 -0
- package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +152 -0
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +8 -0
- package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +104 -0
- package/src/llama.cpp/ggml/src/ggml-impl.h +393 -22
- package/src/llama.cpp/ggml/src/ggml-kompute/CMakeLists.txt +166 -0
- package/src/llama.cpp/ggml/src/{ggml-kompute.cpp → ggml-kompute/ggml-kompute.cpp} +360 -127
- package/src/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +105 -0
- package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +288 -0
- package/src/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +107 -0
- package/src/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +147 -0
- package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +4004 -0
- package/src/llama.cpp/ggml/src/ggml-opt.cpp +854 -0
- package/src/llama.cpp/ggml/src/ggml-quants.c +188 -10702
- package/src/llama.cpp/ggml/src/ggml-quants.h +78 -125
- package/src/llama.cpp/ggml/src/ggml-rpc/CMakeLists.txt +9 -0
- package/src/llama.cpp/ggml/src/{ggml-rpc.cpp → ggml-rpc/ggml-rpc.cpp} +478 -300
- package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +84 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +3 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +36 -5
- package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +259 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/concat.cpp +3 -2
- package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +1 -1
- package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +5 -5
- package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +34 -35
- package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +1030 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +76 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +4 -4
- package/src/llama.cpp/ggml/src/{ggml-sycl.cpp → ggml-sycl/ggml-sycl.cpp} +3638 -4151
- package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +3 -2
- package/src/llama.cpp/ggml/src/ggml-sycl/mmq.cpp +6 -6
- package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +75 -87
- package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +7 -6
- package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +56 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/outprod.hpp +11 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/presets.hpp +6 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +4 -3
- package/src/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +7 -7
- package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +1 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +4 -4
- package/src/llama.cpp/ggml/src/ggml-sycl/wkv6.cpp +141 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/wkv6.hpp +10 -0
- package/src/llama.cpp/ggml/src/ggml-threading.cpp +12 -0
- package/src/llama.cpp/ggml/src/ggml-threading.h +14 -0
- package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +92 -0
- package/src/llama.cpp/ggml/src/{ggml-vulkan.cpp → ggml-vulkan/ggml-vulkan.cpp} +2138 -887
- package/src/llama.cpp/ggml/src/{vulkan-shaders → ggml-vulkan/vulkan-shaders}/CMakeLists.txt +3 -1
- package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +593 -0
- package/src/llama.cpp/ggml/src/ggml.c +4427 -20125
- package/src/llama.cpp/include/llama-cpp.h +25 -0
- package/src/llama.cpp/include/llama.h +93 -52
- package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.inp +112 -0
- package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.out +46 -0
- package/src/llama.cpp/pocs/CMakeLists.txt +3 -1
- package/src/llama.cpp/pocs/vdot/CMakeLists.txt +2 -2
- package/src/llama.cpp/pocs/vdot/q8dot.cpp +4 -3
- package/src/llama.cpp/pocs/vdot/vdot.cpp +8 -7
- package/src/llama.cpp/src/CMakeLists.txt +4 -8
- package/src/llama.cpp/src/llama-grammar.cpp +15 -15
- package/src/llama.cpp/src/llama-grammar.h +2 -5
- package/src/llama.cpp/src/llama-sampling.cpp +779 -194
- package/src/llama.cpp/src/llama-sampling.h +21 -2
- package/src/llama.cpp/src/llama-vocab.cpp +55 -10
- package/src/llama.cpp/src/llama-vocab.h +35 -11
- package/src/llama.cpp/src/llama.cpp +4317 -2979
- package/src/llama.cpp/src/unicode-data.cpp +2 -2
- package/src/llama.cpp/src/unicode.cpp +62 -51
- package/src/llama.cpp/src/unicode.h +9 -10
- package/src/llama.cpp/tests/CMakeLists.txt +48 -38
- package/src/llama.cpp/tests/test-arg-parser.cpp +15 -15
- package/src/llama.cpp/tests/test-backend-ops.cpp +324 -80
- package/src/llama.cpp/tests/test-barrier.cpp +1 -0
- package/src/llama.cpp/tests/test-chat-template.cpp +59 -9
- package/src/llama.cpp/tests/test-gguf.cpp +1303 -0
- package/src/llama.cpp/tests/test-grammar-integration.cpp +3 -6
- package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +17 -4
- package/src/llama.cpp/tests/test-llama-grammar.cpp +2 -4
- package/src/llama.cpp/tests/test-log.cpp +2 -2
- package/src/llama.cpp/tests/test-opt.cpp +853 -142
- package/src/llama.cpp/tests/test-quantize-fns.cpp +24 -21
- package/src/llama.cpp/tests/test-quantize-perf.cpp +16 -14
- package/src/llama.cpp/tests/test-rope.cpp +62 -20
- package/src/llama.cpp/tests/test-sampling.cpp +163 -138
- package/src/llama.cpp/tests/test-tokenizer-0.cpp +7 -7
- package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +5 -5
- package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +5 -5
- package/src/llama.cpp/.github/workflows/nix-ci-aarch64.yml +0 -72
- package/src/llama.cpp/.github/workflows/nix-ci.yml +0 -79
- package/src/llama.cpp/.github/workflows/nix-flake-update.yml +0 -22
- package/src/llama.cpp/.github/workflows/nix-publish-flake.yml +0 -36
- package/src/llama.cpp/common/train.cpp +0 -1515
- package/src/llama.cpp/common/train.h +0 -233
- package/src/llama.cpp/examples/baby-llama/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/baby-llama/baby-llama.cpp +0 -1639
- package/src/llama.cpp/ggml/src/ggml-aarch64.h +0 -39
- package/src/llama.cpp/ggml/src/vulkan-shaders/vulkan-shaders-gen.cpp +0 -600
- package/src/llama.cpp/tests/test-grad0.cpp +0 -1683
- /package/src/llama.cpp/ggml/{cmake → src/ggml-cpu/cmake}/FindSIMD.cmake +0 -0
- /package/src/llama.cpp/ggml/src/{llamafile → ggml-cpu/llamafile}/sgemm.h +0 -0
|
@@ -0,0 +1,336 @@
|
|
|
1
|
+
function(ggml_add_cpu_backend_variant_impl tag_name)
|
|
2
|
+
if (tag_name)
|
|
3
|
+
set(GGML_CPU_NAME ggml-cpu-${tag_name})
|
|
4
|
+
else()
|
|
5
|
+
set(GGML_CPU_NAME ggml-cpu)
|
|
6
|
+
endif()
|
|
7
|
+
|
|
8
|
+
ggml_add_backend_library(${GGML_CPU_NAME})
|
|
9
|
+
|
|
10
|
+
list (APPEND GGML_CPU_SOURCES
|
|
11
|
+
ggml-cpu/ggml-cpu.c
|
|
12
|
+
ggml-cpu/ggml-cpu.cpp
|
|
13
|
+
ggml-cpu/ggml-cpu-aarch64.cpp
|
|
14
|
+
ggml-cpu/ggml-cpu-aarch64.h
|
|
15
|
+
ggml-cpu/ggml-cpu-hbm.cpp
|
|
16
|
+
ggml-cpu/ggml-cpu-hbm.h
|
|
17
|
+
ggml-cpu/ggml-cpu-quants.c
|
|
18
|
+
ggml-cpu/ggml-cpu-quants.h
|
|
19
|
+
ggml-cpu/ggml-cpu-traits.cpp
|
|
20
|
+
ggml-cpu/ggml-cpu-traits.h
|
|
21
|
+
ggml-cpu/amx/amx.cpp
|
|
22
|
+
ggml-cpu/amx/amx.h
|
|
23
|
+
ggml-cpu/amx/mmq.cpp
|
|
24
|
+
ggml-cpu/amx/mmq.h
|
|
25
|
+
ggml-cpu/ggml-cpu-impl.h
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
target_compile_features(${GGML_CPU_NAME} PRIVATE c_std_11 cxx_std_17)
|
|
29
|
+
target_include_directories(${GGML_CPU_NAME} PRIVATE . ggml-cpu)
|
|
30
|
+
|
|
31
|
+
if (APPLE AND GGML_ACCELERATE)
|
|
32
|
+
find_library(ACCELERATE_FRAMEWORK Accelerate)
|
|
33
|
+
if (ACCELERATE_FRAMEWORK)
|
|
34
|
+
message(STATUS "Accelerate framework found")
|
|
35
|
+
|
|
36
|
+
target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_ACCELERATE)
|
|
37
|
+
target_compile_definitions(${GGML_CPU_NAME} PRIVATE ACCELERATE_NEW_LAPACK)
|
|
38
|
+
target_compile_definitions(${GGML_CPU_NAME} PRIVATE ACCELERATE_LAPACK_ILP64)
|
|
39
|
+
|
|
40
|
+
target_link_libraries(${GGML_CPU_NAME} PRIVATE ${ACCELERATE_FRAMEWORK})
|
|
41
|
+
else()
|
|
42
|
+
message(WARNING "Accelerate framework not found")
|
|
43
|
+
endif()
|
|
44
|
+
endif()
|
|
45
|
+
|
|
46
|
+
if (GGML_OPENMP)
|
|
47
|
+
find_package(OpenMP)
|
|
48
|
+
if (OpenMP_FOUND)
|
|
49
|
+
target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_OPENMP)
|
|
50
|
+
|
|
51
|
+
target_link_libraries(${GGML_CPU_NAME} PRIVATE OpenMP::OpenMP_C OpenMP::OpenMP_CXX)
|
|
52
|
+
else()
|
|
53
|
+
message(WARNING "OpenMP not found")
|
|
54
|
+
endif()
|
|
55
|
+
endif()
|
|
56
|
+
|
|
57
|
+
if (GGML_LLAMAFILE)
|
|
58
|
+
target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_LLAMAFILE)
|
|
59
|
+
|
|
60
|
+
list(APPEND GGML_CPU_SOURCES
|
|
61
|
+
ggml-cpu/llamafile/sgemm.cpp
|
|
62
|
+
ggml-cpu/llamafile/sgemm.h)
|
|
63
|
+
endif()
|
|
64
|
+
|
|
65
|
+
if (GGML_CPU_HBM)
|
|
66
|
+
find_library(memkind memkind REQUIRED)
|
|
67
|
+
|
|
68
|
+
message(STATUS "Using memkind for CPU HBM")
|
|
69
|
+
|
|
70
|
+
target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_CPU_HBM)
|
|
71
|
+
|
|
72
|
+
target_link_libraries(${GGML_CPU_NAME} PUBLIC memkind)
|
|
73
|
+
endif()
|
|
74
|
+
|
|
75
|
+
if (CMAKE_OSX_ARCHITECTURES STREQUAL "arm64" OR
|
|
76
|
+
CMAKE_GENERATOR_PLATFORM_LWR STREQUAL "arm64" OR
|
|
77
|
+
(NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND
|
|
78
|
+
CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm.*|ARM64)$"))
|
|
79
|
+
|
|
80
|
+
message(STATUS "ARM detected")
|
|
81
|
+
|
|
82
|
+
if (MSVC AND NOT CMAKE_C_COMPILER_ID STREQUAL "Clang")
|
|
83
|
+
message(FATAL_ERROR "MSVC is not supported for ARM, use clang")
|
|
84
|
+
else()
|
|
85
|
+
check_cxx_compiler_flag(-mfp16-format=ieee COMPILER_SUPPORTS_FP16_FORMAT_I3E)
|
|
86
|
+
if (NOT "${COMPILER_SUPPORTS_FP16_FORMAT_I3E}" STREQUAL "")
|
|
87
|
+
list(APPEND ARCH_FLAGS -mfp16-format=ieee)
|
|
88
|
+
endif()
|
|
89
|
+
|
|
90
|
+
if (GGML_NATIVE)
|
|
91
|
+
# -mcpu=native does not always enable all the features in some compilers,
|
|
92
|
+
# so we check for them manually and enable them if available
|
|
93
|
+
|
|
94
|
+
execute_process(
|
|
95
|
+
COMMAND ${CMAKE_C_COMPILER} -mcpu=native -E -v -
|
|
96
|
+
INPUT_FILE "/dev/null"
|
|
97
|
+
OUTPUT_QUIET
|
|
98
|
+
ERROR_VARIABLE ARM_MCPU
|
|
99
|
+
RESULT_VARIABLE ARM_MCPU_RESULT
|
|
100
|
+
)
|
|
101
|
+
if (NOT ARM_MCPU_RESULT)
|
|
102
|
+
string(REGEX MATCH "-mcpu=[^ ']+" ARM_MCPU_FLAG "${ARM_MCPU}")
|
|
103
|
+
endif()
|
|
104
|
+
if ("${ARM_MCPU_FLAG}" STREQUAL "")
|
|
105
|
+
set(ARM_MCPU_FLAG -mcpu=native)
|
|
106
|
+
message(STATUS "ARM -mcpu not found, -mcpu=native will be used")
|
|
107
|
+
endif()
|
|
108
|
+
|
|
109
|
+
set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS})
|
|
110
|
+
include(CheckCXXSourceRuns)
|
|
111
|
+
|
|
112
|
+
set(CMAKE_REQUIRED_FLAGS "${ARM_MCPU_FLAG}+dotprod")
|
|
113
|
+
check_cxx_source_runs(
|
|
114
|
+
"#include <arm_neon.h>\nint main() { int8x16_t _a, _b; int32x4_t _s = vdotq_s32(_s, _a, _b); return 0; }"
|
|
115
|
+
GGML_COMPILER_SUPPORT_DOTPROD)
|
|
116
|
+
if (GGML_COMPILER_SUPPORT_DOTPROD)
|
|
117
|
+
set(ARM_MCPU_FLAG_FIX "${ARM_MCPU_FLAG_FIX}+dotprod")
|
|
118
|
+
endif()
|
|
119
|
+
|
|
120
|
+
set(CMAKE_REQUIRED_FLAGS "${ARM_MCPU_FLAG}+i8mm")
|
|
121
|
+
check_cxx_source_runs(
|
|
122
|
+
"#include <arm_neon.h>\nint main() { int8x16_t _a, _b; int32x4_t _s = vmmlaq_s32(_s, _a, _b); return 0; }"
|
|
123
|
+
GGML_COMPILER_SUPPORT_I8MM)
|
|
124
|
+
if (GGML_COMPILER_SUPPORT_I8MM)
|
|
125
|
+
set(ARM_MCPU_FLAG_FIX "${ARM_MCPU_FLAG_FIX}+i8mm")
|
|
126
|
+
endif()
|
|
127
|
+
|
|
128
|
+
set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_SAVE})
|
|
129
|
+
list(APPEND ARCH_FLAGS "${ARM_MCPU_FLAG}${ARM_MCPU_FLAG_FIX}")
|
|
130
|
+
|
|
131
|
+
else()
|
|
132
|
+
if (GGML_CPU_ARM_ARCH)
|
|
133
|
+
list(APPEND ARCH_FLAGS -march=${GGML_CPU_ARM_ARCH})
|
|
134
|
+
endif()
|
|
135
|
+
endif()
|
|
136
|
+
|
|
137
|
+
# show enabled features
|
|
138
|
+
execute_process(
|
|
139
|
+
COMMAND ${CMAKE_C_COMPILER} ${ARCH_FLAGS} -dM -E -
|
|
140
|
+
INPUT_FILE "/dev/null"
|
|
141
|
+
OUTPUT_VARIABLE ARM_FEATURE
|
|
142
|
+
RESULT_VARIABLE ARM_FEATURE_RESULT
|
|
143
|
+
)
|
|
144
|
+
if (ARM_FEATURE_RESULT)
|
|
145
|
+
message(FATAL_ERROR "Failed to get ARM features")
|
|
146
|
+
else()
|
|
147
|
+
foreach(feature DOTPROD SVE MATMUL_INT8 FMA FP16_VECTOR_ARITHMETIC)
|
|
148
|
+
string(FIND "${ARM_FEATURE}" "__ARM_FEATURE_${feature} 1" feature_pos)
|
|
149
|
+
if (NOT ${feature_pos} EQUAL -1)
|
|
150
|
+
message(STATUS "ARM feature ${feature} enabled")
|
|
151
|
+
endif()
|
|
152
|
+
endforeach()
|
|
153
|
+
endif()
|
|
154
|
+
endif()
|
|
155
|
+
elseif (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM_LWR MATCHES "^(x86_64|i686|amd64|x64|win32)$" OR
|
|
156
|
+
(NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND
|
|
157
|
+
CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|i686|AMD64|amd64)$"))
|
|
158
|
+
|
|
159
|
+
message(STATUS "x86 detected")
|
|
160
|
+
|
|
161
|
+
if (MSVC)
|
|
162
|
+
# instruction set detection for MSVC only
|
|
163
|
+
if (GGML_NATIVE)
|
|
164
|
+
include(ggml-cpu/cmake/FindSIMD.cmake)
|
|
165
|
+
endif ()
|
|
166
|
+
if (GGML_AVX512)
|
|
167
|
+
list(APPEND ARCH_FLAGS /arch:AVX512)
|
|
168
|
+
# /arch:AVX512 includes: __AVX512F__, __AVX512CD__, __AVX512BW__, __AVX512DQ__, and __AVX512VL__
|
|
169
|
+
# MSVC has no compile-time flags enabling specific
|
|
170
|
+
# AVX512 extensions, neither it defines the
|
|
171
|
+
# macros corresponding to the extensions.
|
|
172
|
+
# Do it manually.
|
|
173
|
+
list(APPEND ARCH_DEFINITIONS GGML_AVX512)
|
|
174
|
+
if (GGML_AVX512_VBMI)
|
|
175
|
+
list(APPEND ARCH_DEFINITIONS __AVX512VBMI__)
|
|
176
|
+
if (CMAKE_C_COMPILER_ID STREQUAL "Clang")
|
|
177
|
+
list(APPEND ARCH_FLAGS -mavx512vbmi)
|
|
178
|
+
endif()
|
|
179
|
+
endif()
|
|
180
|
+
if (GGML_AVX512_VNNI)
|
|
181
|
+
list(APPEND ARCH_DEFINITIONS __AVX512VNNI__ GGML_AVX512_VNNI)
|
|
182
|
+
if (CMAKE_C_COMPILER_ID STREQUAL "Clang")
|
|
183
|
+
list(APPEND ARCH_FLAGS -mavx512vnni)
|
|
184
|
+
endif()
|
|
185
|
+
endif()
|
|
186
|
+
if (GGML_AVX512_BF16)
|
|
187
|
+
list(APPEND ARCH_DEFINITIONS __AVX512BF16__ GGML_AVX512_BF16)
|
|
188
|
+
if (CMAKE_C_COMPILER_ID STREQUAL "Clang")
|
|
189
|
+
list(APPEND ARCH_FLAGS -mavx512bf16)
|
|
190
|
+
endif()
|
|
191
|
+
endif()
|
|
192
|
+
if (GGML_AMX_TILE)
|
|
193
|
+
list(APPEND ARCH_DEFINITIONS __AMX_TILE__ GGML_AMX_TILE)
|
|
194
|
+
endif()
|
|
195
|
+
if (GGML_AMX_INT8)
|
|
196
|
+
list(APPEND ARCH_DEFINITIONS __AMX_INT8__ GGML_AMX_INT8)
|
|
197
|
+
endif()
|
|
198
|
+
if (GGML_AMX_BF16)
|
|
199
|
+
list(APPEND ARCH_DEFINITIONS __AMX_BF16__ GGML_AMX_BF16)
|
|
200
|
+
endif()
|
|
201
|
+
elseif (GGML_AVX2)
|
|
202
|
+
list(APPEND ARCH_FLAGS /arch:AVX2)
|
|
203
|
+
list(APPEND ARCH_DEFINITIONS GGML_AVX2 GGML_FMA GGML_F16C)
|
|
204
|
+
elseif (GGML_AVX)
|
|
205
|
+
list(APPEND ARCH_FLAGS /arch:AVX)
|
|
206
|
+
list(APPEND ARCH_DEFINITIONS GGML_AVX)
|
|
207
|
+
else ()
|
|
208
|
+
list(APPEND ARCH_FLAGS /arch:SSE4.2)
|
|
209
|
+
list(APPEND ARCH_DEFINITIONS GGML_SSE42)
|
|
210
|
+
endif()
|
|
211
|
+
if (GGML_AVX_VNNI)
|
|
212
|
+
# MSVC generates AVX512 with AVX-VNNI intrinsics even with /arch:AVX2
|
|
213
|
+
#list(APPEND ARCH_DEFINITIONS __AVXVNNI__ GGML_AVX_VNNI)
|
|
214
|
+
endif()
|
|
215
|
+
else ()
|
|
216
|
+
if (GGML_NATIVE)
|
|
217
|
+
list(APPEND ARCH_FLAGS -march=native)
|
|
218
|
+
else ()
|
|
219
|
+
list(APPEND ARCH_FLAGS -msse4.2)
|
|
220
|
+
list(APPEND ARCH_DEFINITIONS GGML_SSE42)
|
|
221
|
+
if (GGML_F16C)
|
|
222
|
+
list(APPEND ARCH_FLAGS -mf16c)
|
|
223
|
+
list(APPEND ARCH_DEFINITIONS GGML_F16C)
|
|
224
|
+
endif()
|
|
225
|
+
if (GGML_FMA)
|
|
226
|
+
list(APPEND ARCH_FLAGS -mfma)
|
|
227
|
+
list(APPEND ARCH_DEFINITIONS GGML_FMA)
|
|
228
|
+
endif()
|
|
229
|
+
if (GGML_AVX)
|
|
230
|
+
list(APPEND ARCH_FLAGS -mavx)
|
|
231
|
+
list(APPEND ARCH_DEFINITIONS GGML_AVX)
|
|
232
|
+
endif()
|
|
233
|
+
if (GGML_AVX2)
|
|
234
|
+
list(APPEND ARCH_FLAGS -mavx2)
|
|
235
|
+
list(APPEND ARCH_DEFINITIONS GGML_AVX2)
|
|
236
|
+
endif()
|
|
237
|
+
if (GGML_AVX_VNNI)
|
|
238
|
+
list(APPEND ARCH_FLAGS -mavxvnni)
|
|
239
|
+
list(APPEND ARCH_DEFINITIONS GGML_AVX_VNNI)
|
|
240
|
+
endif()
|
|
241
|
+
if (GGML_AVX512)
|
|
242
|
+
list(APPEND ARCH_FLAGS -mavx512f)
|
|
243
|
+
list(APPEND ARCH_FLAGS -mavx512cd)
|
|
244
|
+
list(APPEND ARCH_FLAGS -mavx512vl)
|
|
245
|
+
list(APPEND ARCH_FLAGS -mavx512dq)
|
|
246
|
+
list(APPEND ARCH_FLAGS -mavx512bw)
|
|
247
|
+
list(APPEND ARCH_DEFINITIONS GGML_AVX512)
|
|
248
|
+
endif()
|
|
249
|
+
if (GGML_AVX512_VBMI)
|
|
250
|
+
list(APPEND ARCH_FLAGS -mavx512vbmi)
|
|
251
|
+
list(APPEND ARCH_DEFINITIONS GGML_AVX512_VBMI)
|
|
252
|
+
endif()
|
|
253
|
+
if (GGML_AVX512_VNNI)
|
|
254
|
+
list(APPEND ARCH_FLAGS -mavx512vnni)
|
|
255
|
+
list(APPEND ARCH_DEFINITIONS GGML_AVX512_VNNI)
|
|
256
|
+
endif()
|
|
257
|
+
if (GGML_AVX512_BF16)
|
|
258
|
+
list(APPEND ARCH_FLAGS -mavx512bf16)
|
|
259
|
+
list(APPEND ARCH_DEFINITIONS GGML_AVX512_BF16)
|
|
260
|
+
endif()
|
|
261
|
+
if (GGML_AMX_TILE)
|
|
262
|
+
list(APPEND ARCH_FLAGS -mamx-tile)
|
|
263
|
+
list(APPEND ARCH_DEFINITIONS GGML_AMX_TILE)
|
|
264
|
+
endif()
|
|
265
|
+
if (GGML_AMX_INT8)
|
|
266
|
+
list(APPEND ARCH_FLAGS -mamx-int8)
|
|
267
|
+
list(APPEND ARCH_DEFINITIONS GGML_AMX_INT8)
|
|
268
|
+
endif()
|
|
269
|
+
if (GGML_AMX_BF16)
|
|
270
|
+
list(APPEND ARCH_FLAGS -mamx-bf16)
|
|
271
|
+
list(APPEND ARCH_DEFINITIONS GGML_AMX_BF16)
|
|
272
|
+
endif()
|
|
273
|
+
endif()
|
|
274
|
+
endif()
|
|
275
|
+
elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64")
|
|
276
|
+
message(STATUS "PowerPC detected")
|
|
277
|
+
execute_process(COMMAND bash -c "grep POWER10 /proc/cpuinfo | head -n 1" OUTPUT_VARIABLE POWER10_M)
|
|
278
|
+
string(FIND "${POWER10_M}" "POWER10" substring_index)
|
|
279
|
+
if (NOT DEFINED substring_index OR "${substring_index}" STREQUAL "")
|
|
280
|
+
set(substring_index -1)
|
|
281
|
+
endif()
|
|
282
|
+
|
|
283
|
+
if (${substring_index} GREATER_EQUAL 0)
|
|
284
|
+
list(APPEND ARCH_FLAGS -mcpu=power10)
|
|
285
|
+
elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64le")
|
|
286
|
+
list(APPEND ARCH_FLAGS -mcpu=powerpc64le)
|
|
287
|
+
else()
|
|
288
|
+
list(APPEND ARCH_FLAGS -mcpu=native -mtune=native)
|
|
289
|
+
# TODO: Add targets for Power8/Power9 (Altivec/VSX) and Power10(MMA) and query for big endian systems (ppc64/le/be)
|
|
290
|
+
endif()
|
|
291
|
+
elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64")
|
|
292
|
+
message(STATUS "loongarch64 detected")
|
|
293
|
+
|
|
294
|
+
list(APPEND ARCH_FLAGS -march=loongarch64)
|
|
295
|
+
if (GGML_LASX)
|
|
296
|
+
list(APPEND ARCH_FLAGS -mlasx)
|
|
297
|
+
endif()
|
|
298
|
+
if (GGML_LSX)
|
|
299
|
+
list(APPEND ARCH_FLAGS -mlsx)
|
|
300
|
+
endif()
|
|
301
|
+
elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "riscv64")
|
|
302
|
+
message(STATUS "RISC-V detected")
|
|
303
|
+
if (GGML_RVV)
|
|
304
|
+
list(APPEND ARCH_FLAGS -march=rv64gcv -mabi=lp64d)
|
|
305
|
+
endif()
|
|
306
|
+
else()
|
|
307
|
+
message(STATUS "Unknown architecture")
|
|
308
|
+
endif()
|
|
309
|
+
|
|
310
|
+
if (GGML_CPU_AARCH64)
|
|
311
|
+
target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_CPU_AARCH64)
|
|
312
|
+
endif()
|
|
313
|
+
|
|
314
|
+
message(STATUS "Adding CPU backend variant ${GGML_CPU_NAME}: ${ARCH_FLAGS} ${ARCH_DEFINITIONS}")
|
|
315
|
+
target_sources(${GGML_CPU_NAME} PRIVATE ${GGML_CPU_SOURCES})
|
|
316
|
+
target_compile_options(${GGML_CPU_NAME} PRIVATE ${ARCH_FLAGS})
|
|
317
|
+
target_compile_definitions(${GGML_CPU_NAME} PRIVATE ${ARCH_DEFINITIONS})
|
|
318
|
+
|
|
319
|
+
if (GGML_BACKEND_DL)
|
|
320
|
+
# The feature detection code is compiled as a separate target so that
|
|
321
|
+
# it can be built without the architecture flags
|
|
322
|
+
# Since multiple variants of the CPU backend may be included in the same
|
|
323
|
+
# build, using set_source_files_properties() to set the arch flags is not possible
|
|
324
|
+
set(GGML_CPU_FEATS_NAME ${GGML_CPU_NAME}-feats)
|
|
325
|
+
add_library(${GGML_CPU_FEATS_NAME} OBJECT ggml-cpu/cpu-feats-x86.cpp)
|
|
326
|
+
target_include_directories(${GGML_CPU_FEATS_NAME} PRIVATE . .. ../include)
|
|
327
|
+
target_compile_definitions(${GGML_CPU_FEATS_NAME} PRIVATE ${ARCH_DEFINITIONS})
|
|
328
|
+
target_compile_definitions(${GGML_CPU_FEATS_NAME} PRIVATE GGML_BACKEND_DL GGML_BACKEND_BUILD GGML_BACKEND_SHARED)
|
|
329
|
+
set_target_properties(${GGML_CPU_FEATS_NAME} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
|
330
|
+
target_link_libraries(${GGML_CPU_NAME} PRIVATE ${GGML_CPU_FEATS_NAME})
|
|
331
|
+
endif()
|
|
332
|
+
|
|
333
|
+
if (EMSCRIPTEN)
|
|
334
|
+
set_target_properties(${GGML_CPU_NAME} PROPERTIES COMPILE_FLAGS "-msimd128")
|
|
335
|
+
endif()
|
|
336
|
+
endfunction()
|
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
#include "amx.h"
|
|
2
|
+
#include "common.h"
|
|
3
|
+
#include "mmq.h"
|
|
4
|
+
#include "ggml-backend-impl.h"
|
|
5
|
+
#include "ggml-backend.h"
|
|
6
|
+
#include "ggml-impl.h"
|
|
7
|
+
#include "ggml-cpu.h"
|
|
8
|
+
#include "ggml-cpu-traits.h"
|
|
9
|
+
|
|
10
|
+
#if defined(__gnu_linux__)
|
|
11
|
+
#include <sys/syscall.h>
|
|
12
|
+
#include <unistd.h>
|
|
13
|
+
#endif
|
|
14
|
+
|
|
15
|
+
#include <cstdlib>
|
|
16
|
+
#include <cstring>
|
|
17
|
+
#include <memory>
|
|
18
|
+
|
|
19
|
+
#if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
|
|
20
|
+
|
|
21
|
+
// AMX type_trais
|
|
22
|
+
namespace ggml::cpu::amx {
|
|
23
|
+
class tensor_traits : public ggml::cpu::tensor_traits {
|
|
24
|
+
bool work_size(int /* n_threads */, const struct ggml_tensor * op, size_t & size) override {
|
|
25
|
+
size = ggml_backend_amx_desired_wsize(op);
|
|
26
|
+
return true;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) override {
|
|
30
|
+
if (op->op == GGML_OP_MUL_MAT) {
|
|
31
|
+
ggml_backend_amx_mul_mat(params, op);
|
|
32
|
+
return true;
|
|
33
|
+
}
|
|
34
|
+
return false;
|
|
35
|
+
}
|
|
36
|
+
};
|
|
37
|
+
|
|
38
|
+
static ggml::cpu::tensor_traits * get_tensor_traits(ggml_backend_buffer_t, struct ggml_tensor *) {
|
|
39
|
+
static tensor_traits traits;
|
|
40
|
+
return &traits;
|
|
41
|
+
}
|
|
42
|
+
} // namespace ggml::cpu::amx
|
|
43
|
+
|
|
44
|
+
// AMX buffer interface
|
|
45
|
+
static void ggml_backend_amx_buffer_free_buffer(ggml_backend_buffer_t buffer) {
|
|
46
|
+
free(buffer->context);
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
static void * ggml_backend_amx_buffer_get_base(ggml_backend_buffer_t buffer) {
|
|
50
|
+
return (void *) (buffer->context);
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
static void ggml_backend_amx_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
|
|
54
|
+
tensor->extra = (void *) ggml::cpu::amx::get_tensor_traits(buffer, tensor);
|
|
55
|
+
|
|
56
|
+
GGML_UNUSED(buffer);
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
static void ggml_backend_amx_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor,
|
|
60
|
+
uint8_t value, size_t offset, size_t size) {
|
|
61
|
+
memset((char *) tensor->data + offset, value, size);
|
|
62
|
+
|
|
63
|
+
GGML_UNUSED(buffer);
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
static void ggml_backend_amx_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor,
|
|
67
|
+
const void * data, size_t offset, size_t size) {
|
|
68
|
+
if (qtype_has_amx_kernels(tensor->type)) {
|
|
69
|
+
GGML_LOG_DEBUG("%s: amx repack tensor %s of type %s\n", __func__, tensor->name, ggml_type_name(tensor->type));
|
|
70
|
+
ggml_backend_amx_convert_weight(tensor, data, offset, size);
|
|
71
|
+
} else {
|
|
72
|
+
memcpy((char *) tensor->data + offset, data, size);
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
GGML_UNUSED(buffer);
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
/*
|
|
79
|
+
// need to figure what we need to do with buffer->extra.
|
|
80
|
+
static void ggml_backend_amx_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
|
|
81
|
+
GGML_ASSERT(!qtype_has_amx_kernels(tensor->type));
|
|
82
|
+
memcpy(data, (const char *)tensor->data + offset, size);
|
|
83
|
+
|
|
84
|
+
GGML_UNUSED(buffer);
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
static bool ggml_backend_amx_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
|
|
88
|
+
if (ggml_backend_buffer_is_host(src->buffer)) {
|
|
89
|
+
if (qtype_has_amx_kernels(src->type)) {
|
|
90
|
+
ggml_backend_amx_convert_weight(dst, src->data, 0, ggml_nbytes(dst));
|
|
91
|
+
} else {
|
|
92
|
+
memcpy(dst->data, src->data, ggml_nbytes(src));
|
|
93
|
+
}
|
|
94
|
+
return true;
|
|
95
|
+
}
|
|
96
|
+
return false;
|
|
97
|
+
|
|
98
|
+
GGML_UNUSED(buffer);
|
|
99
|
+
}
|
|
100
|
+
*/
|
|
101
|
+
|
|
102
|
+
static void ggml_backend_amx_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
|
|
103
|
+
memset(buffer->context, value, buffer->size);
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
static ggml_backend_buffer_i ggml_backend_amx_buffer_interface = {
|
|
107
|
+
/* .free_buffer = */ ggml_backend_amx_buffer_free_buffer,
|
|
108
|
+
/* .get_base = */ ggml_backend_amx_buffer_get_base,
|
|
109
|
+
/* .init_tensor = */ ggml_backend_amx_buffer_init_tensor,
|
|
110
|
+
/* .memset_tensor = */ ggml_backend_amx_buffer_memset_tensor,
|
|
111
|
+
/* .set_tensor = */ ggml_backend_amx_buffer_set_tensor,
|
|
112
|
+
/* .get_tensor = */ nullptr,
|
|
113
|
+
/* .cpy_tensor = */ nullptr,
|
|
114
|
+
/* .clear = */ ggml_backend_amx_buffer_clear,
|
|
115
|
+
/* .reset = */ nullptr,
|
|
116
|
+
};
|
|
117
|
+
|
|
118
|
+
static const char * ggml_backend_amx_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
|
|
119
|
+
return "AMX";
|
|
120
|
+
|
|
121
|
+
GGML_UNUSED(buft);
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
static ggml_backend_buffer_t ggml_backend_amx_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
|
|
125
|
+
void * data = ggml_aligned_malloc(size);
|
|
126
|
+
if (data == NULL) {
|
|
127
|
+
fprintf(stderr, "%s: failed to allocate buffer of size %zu\n", __func__, size);
|
|
128
|
+
return NULL;
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
return ggml_backend_buffer_init(buft, ggml_backend_amx_buffer_interface, data, size);
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
static size_t ggml_backend_amx_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
|
|
135
|
+
return TENSOR_ALIGNMENT;
|
|
136
|
+
|
|
137
|
+
GGML_UNUSED(buft);
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
namespace ggml::cpu::amx {
|
|
141
|
+
class extra_buffer_type : ggml::cpu::extra_buffer_type {
|
|
142
|
+
bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override {
|
|
143
|
+
// handle only 2d gemm for now
|
|
144
|
+
auto is_contiguous_2d = [](const struct ggml_tensor * t) {
|
|
145
|
+
return ggml_is_contiguous(t) && t->ne[3] == 1 && t->ne[2] == 1;
|
|
146
|
+
};
|
|
147
|
+
|
|
148
|
+
if (op->op == GGML_OP_MUL_MAT && is_contiguous_2d(op->src[0]) && // src0 must be contiguous
|
|
149
|
+
is_contiguous_2d(op->src[1]) && // src1 must be contiguous
|
|
150
|
+
op->src[0]->buffer && op->src[0]->buffer->buft == ggml_backend_amx_buffer_type() &&
|
|
151
|
+
op->ne[0] % (TILE_N * 2) == 0 && // out_features is 32x
|
|
152
|
+
(qtype_has_amx_kernels(op->src[0]->type) || (op->src[0]->type == GGML_TYPE_F16))) {
|
|
153
|
+
// src1 must be host buffer
|
|
154
|
+
if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) {
|
|
155
|
+
return false;
|
|
156
|
+
}
|
|
157
|
+
// src1 must be float32
|
|
158
|
+
if (op->src[1]->type == GGML_TYPE_F32) {
|
|
159
|
+
return true;
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
return false;
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
ggml::cpu::tensor_traits * get_tensor_traits(const struct ggml_tensor * op) override {
|
|
166
|
+
if (op->op == GGML_OP_MUL_MAT && op->src[0]->buffer &&
|
|
167
|
+
op->src[0]->buffer->buft == ggml_backend_amx_buffer_type()) {
|
|
168
|
+
return (ggml::cpu::tensor_traits *) op->src[0]->extra;
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
return nullptr;
|
|
172
|
+
}
|
|
173
|
+
};
|
|
174
|
+
} // namespace ggml::cpu::amx
|
|
175
|
+
|
|
176
|
+
static size_t ggml_backend_amx_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
|
|
177
|
+
return ggml_backend_amx_get_alloc_size(tensor);
|
|
178
|
+
|
|
179
|
+
GGML_UNUSED(buft);
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
#define ARCH_GET_XCOMP_PERM 0x1022
|
|
183
|
+
#define ARCH_REQ_XCOMP_PERM 0x1023
|
|
184
|
+
#define XFEATURE_XTILECFG 17
|
|
185
|
+
#define XFEATURE_XTILEDATA 18
|
|
186
|
+
|
|
187
|
+
static bool ggml_amx_init() {
|
|
188
|
+
#if defined(__gnu_linux__)
|
|
189
|
+
if (syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA)) {
|
|
190
|
+
fprintf(stderr, "AMX is not ready to be used!\n");
|
|
191
|
+
return false;
|
|
192
|
+
}
|
|
193
|
+
return true;
|
|
194
|
+
#elif defined(_WIN32)
|
|
195
|
+
return true;
|
|
196
|
+
#endif
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
ggml_backend_buffer_type_t ggml_backend_amx_buffer_type() {
|
|
200
|
+
static struct ggml_backend_buffer_type ggml_backend_buffer_type_amx = {
|
|
201
|
+
/* .iface = */ {
|
|
202
|
+
/* .get_name = */ ggml_backend_amx_buffer_type_get_name,
|
|
203
|
+
/* .alloc_buffer = */ ggml_backend_amx_buffer_type_alloc_buffer,
|
|
204
|
+
/* .get_alignment = */ ggml_backend_amx_buffer_type_get_alignment,
|
|
205
|
+
/* .get_max_size = */ nullptr, // defaults to SIZE_MAX
|
|
206
|
+
/* .get_alloc_size = */ ggml_backend_amx_buffer_type_get_alloc_size,
|
|
207
|
+
/* .is_host = */ nullptr,
|
|
208
|
+
},
|
|
209
|
+
/* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
|
|
210
|
+
/* .context = */ new ggml::cpu::amx::extra_buffer_type(),
|
|
211
|
+
};
|
|
212
|
+
|
|
213
|
+
if (!ggml_amx_init()) {
|
|
214
|
+
return nullptr;
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
return &ggml_backend_buffer_type_amx;
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
#endif // defined(__AMX_INT8__) && defined(__AVX512VNNI__)
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include "ggml.h"
|
|
4
|
+
#include "ggml-cpu-impl.h"
|
|
5
|
+
|
|
6
|
+
#include <algorithm>
|
|
7
|
+
#include <memory>
|
|
8
|
+
#include <type_traits>
|
|
9
|
+
|
|
10
|
+
#if defined(GGML_USE_OPENMP)
|
|
11
|
+
#include <omp.h>
|
|
12
|
+
#endif
|
|
13
|
+
|
|
14
|
+
#define TILE_M 16
|
|
15
|
+
#define TILE_N 16
|
|
16
|
+
#define TILE_K 32
|
|
17
|
+
#define VNNI_BLK 4
|
|
18
|
+
|
|
19
|
+
#define AMX_BLK_SIZE 32
|
|
20
|
+
|
|
21
|
+
#define TMM0 0
|
|
22
|
+
#define TMM1 1
|
|
23
|
+
#define TMM2 2
|
|
24
|
+
#define TMM3 3
|
|
25
|
+
#define TMM4 4
|
|
26
|
+
#define TMM5 5
|
|
27
|
+
#define TMM6 6
|
|
28
|
+
#define TMM7 7
|
|
29
|
+
|
|
30
|
+
// parallel routines
|
|
31
|
+
template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
|
|
32
|
+
inline T div_up(T x, T y) { return (x + y - 1) / y; }
|
|
33
|
+
|
|
34
|
+
template <typename T>
|
|
35
|
+
inline void balance211(T n, T nth, T ith, T& n_start, T& n_end) {
|
|
36
|
+
#if 0
|
|
37
|
+
// onednn partition pattern
|
|
38
|
+
T& n_my = n_end;
|
|
39
|
+
if (nth <= 1 || n == 0) {
|
|
40
|
+
n_start = 0;
|
|
41
|
+
n_my = n;
|
|
42
|
+
} else {
|
|
43
|
+
T n1 = div_up(n, nth);
|
|
44
|
+
T n2 = n1 - 1;
|
|
45
|
+
T T1 = n - n2 * nth;
|
|
46
|
+
n_my = ith < T1 ? n1 : n2;
|
|
47
|
+
n_start = ith <= T1 ? ith*n1 : T1 * n1 + (ith - T1) * n2;
|
|
48
|
+
}
|
|
49
|
+
n_end += n_start;
|
|
50
|
+
#else
|
|
51
|
+
// pytorch aten partition pattern
|
|
52
|
+
T n_my = div_up(n, nth);
|
|
53
|
+
n_start = ith * n_my;
|
|
54
|
+
n_end = std::min(n_start + n_my, n);
|
|
55
|
+
#endif
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
template <typename func_t>
|
|
59
|
+
inline void parallel_for(int n, const func_t& f) {
|
|
60
|
+
#if defined(GGML_USE_OPENMP)
|
|
61
|
+
#pragma omp parallel
|
|
62
|
+
{
|
|
63
|
+
int nth = omp_get_num_threads();
|
|
64
|
+
int ith = omp_get_thread_num();
|
|
65
|
+
int tbegin, tend;
|
|
66
|
+
balance211(n, nth, ith, tbegin, tend);
|
|
67
|
+
f(tbegin, tend);
|
|
68
|
+
}
|
|
69
|
+
#else
|
|
70
|
+
f(0, n);
|
|
71
|
+
#endif
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
template <typename func_t>
|
|
75
|
+
inline void parallel_for_ggml(const ggml_compute_params * params, int n, const func_t & f) {
|
|
76
|
+
int tbegin, tend;
|
|
77
|
+
balance211(n, params->nth, params->ith, tbegin, tend);
|
|
78
|
+
f(tbegin, tend);
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
// quantized types that have AMX support
|
|
82
|
+
inline bool qtype_has_amx_kernels(const enum ggml_type type) {
|
|
83
|
+
// TODO: fix padding for vnni format
|
|
84
|
+
return (type == GGML_TYPE_Q4_0) ||
|
|
85
|
+
(type == GGML_TYPE_Q4_1) ||
|
|
86
|
+
(type == GGML_TYPE_Q8_0) ||
|
|
87
|
+
(type == GGML_TYPE_Q4_K) ||
|
|
88
|
+
(type == GGML_TYPE_Q5_K) ||
|
|
89
|
+
(type == GGML_TYPE_Q6_K) ||
|
|
90
|
+
(type == GGML_TYPE_IQ4_XS);
|
|
91
|
+
}
|