@fugood/llama.node 0.3.2 → 0.3.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CMakeLists.txt +7 -0
- package/bin/darwin/arm64/llama-node.node +0 -0
- package/bin/darwin/x64/llama-node.node +0 -0
- package/bin/linux/arm64/llama-node.node +0 -0
- package/bin/linux/x64/llama-node.node +0 -0
- package/bin/linux-vulkan/arm64/llama-node.node +0 -0
- package/bin/linux-vulkan/x64/llama-node.node +0 -0
- package/bin/win32/arm64/llama-node.node +0 -0
- package/bin/win32/arm64/node.lib +0 -0
- package/bin/win32/x64/llama-node.node +0 -0
- package/bin/win32/x64/node.lib +0 -0
- package/bin/win32-vulkan/arm64/llama-node.node +0 -0
- package/bin/win32-vulkan/arm64/node.lib +0 -0
- package/bin/win32-vulkan/x64/llama-node.node +0 -0
- package/bin/win32-vulkan/x64/node.lib +0 -0
- package/lib/binding.ts +18 -1
- package/package.json +1 -1
- package/src/DetokenizeWorker.cpp +1 -1
- package/src/EmbeddingWorker.cpp +17 -7
- package/src/EmbeddingWorker.h +2 -1
- package/src/LlamaCompletionWorker.cpp +8 -8
- package/src/LlamaCompletionWorker.h +2 -2
- package/src/LlamaContext.cpp +89 -27
- package/src/LlamaContext.h +2 -0
- package/src/TokenizeWorker.cpp +1 -1
- package/src/common.hpp +4 -4
- package/src/llama.cpp/.github/workflows/build.yml +240 -168
- package/src/llama.cpp/.github/workflows/docker.yml +8 -8
- package/src/llama.cpp/.github/workflows/python-lint.yml +8 -1
- package/src/llama.cpp/.github/workflows/server.yml +21 -14
- package/src/llama.cpp/CMakeLists.txt +14 -6
- package/src/llama.cpp/Sources/llama/llama.h +4 -0
- package/src/llama.cpp/cmake/arm64-apple-clang.cmake +16 -0
- package/src/llama.cpp/cmake/common.cmake +33 -0
- package/src/llama.cpp/cmake/x64-windows-llvm.cmake +11 -0
- package/src/llama.cpp/common/CMakeLists.txt +6 -4
- package/src/llama.cpp/common/arg.cpp +986 -770
- package/src/llama.cpp/common/arg.h +22 -22
- package/src/llama.cpp/common/common.cpp +212 -351
- package/src/llama.cpp/common/common.h +204 -117
- package/src/llama.cpp/common/json-schema-to-grammar.cpp +1 -1
- package/src/llama.cpp/common/log.cpp +50 -50
- package/src/llama.cpp/common/log.h +18 -18
- package/src/llama.cpp/common/ngram-cache.cpp +36 -36
- package/src/llama.cpp/common/ngram-cache.h +19 -19
- package/src/llama.cpp/common/sampling.cpp +163 -121
- package/src/llama.cpp/common/sampling.h +41 -20
- package/src/llama.cpp/common/speculative.cpp +274 -0
- package/src/llama.cpp/common/speculative.h +28 -0
- package/src/llama.cpp/docs/build.md +134 -161
- package/src/llama.cpp/examples/CMakeLists.txt +33 -14
- package/src/llama.cpp/examples/batched/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/batched/batched.cpp +19 -18
- package/src/llama.cpp/examples/batched-bench/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/batched-bench/batched-bench.cpp +10 -11
- package/src/llama.cpp/examples/convert-llama2c-to-ggml/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +1 -1
- package/src/llama.cpp/examples/cvector-generator/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/cvector-generator/cvector-generator.cpp +9 -9
- package/src/llama.cpp/examples/deprecation-warning/deprecation-warning.cpp +1 -1
- package/src/llama.cpp/examples/embedding/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/embedding/embedding.cpp +12 -12
- package/src/llama.cpp/examples/eval-callback/CMakeLists.txt +3 -2
- package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +8 -8
- package/src/llama.cpp/examples/export-lora/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/export-lora/export-lora.cpp +5 -5
- package/src/llama.cpp/examples/gbnf-validator/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/gbnf-validator/gbnf-validator.cpp +4 -7
- package/src/llama.cpp/examples/gen-docs/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/gen-docs/gen-docs.cpp +7 -7
- package/src/llama.cpp/examples/gguf/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/gguf-hash/CMakeLists.txt +8 -1
- package/src/llama.cpp/examples/gguf-split/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/gguf-split/gguf-split.cpp +2 -2
- package/src/llama.cpp/examples/gritlm/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/gritlm/gritlm.cpp +18 -18
- package/src/llama.cpp/examples/imatrix/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/imatrix/imatrix.cpp +31 -13
- package/src/llama.cpp/examples/infill/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/infill/infill.cpp +41 -87
- package/src/llama.cpp/examples/llama-bench/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/llama-bench/llama-bench.cpp +439 -459
- package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +2 -0
- package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +11 -14
- package/src/llama.cpp/examples/llava/CMakeLists.txt +10 -3
- package/src/llama.cpp/examples/llava/clip.cpp +263 -66
- package/src/llama.cpp/examples/llava/clip.h +8 -2
- package/src/llama.cpp/examples/llava/llava-cli.cpp +23 -23
- package/src/llama.cpp/examples/llava/llava.cpp +83 -22
- package/src/llama.cpp/examples/llava/minicpmv-cli.cpp +21 -21
- package/src/llama.cpp/examples/llava/qwen2vl-cli.cpp +581 -0
- package/src/llama.cpp/examples/lookahead/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/lookahead/lookahead.cpp +26 -26
- package/src/llama.cpp/examples/lookup/CMakeLists.txt +4 -4
- package/src/llama.cpp/examples/lookup/lookup-create.cpp +7 -7
- package/src/llama.cpp/examples/lookup/lookup-merge.cpp +4 -4
- package/src/llama.cpp/examples/lookup/lookup-stats.cpp +16 -15
- package/src/llama.cpp/examples/lookup/lookup.cpp +30 -30
- package/src/llama.cpp/examples/main/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/main/main.cpp +73 -114
- package/src/llama.cpp/examples/main-cmake-pkg/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/parallel/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/parallel/parallel.cpp +18 -19
- package/src/llama.cpp/examples/passkey/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/passkey/passkey.cpp +14 -14
- package/src/llama.cpp/examples/perplexity/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/perplexity/perplexity.cpp +99 -120
- package/src/llama.cpp/examples/quantize/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/quantize/quantize.cpp +0 -3
- package/src/llama.cpp/examples/quantize-stats/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/quantize-stats/quantize-stats.cpp +10 -9
- package/src/llama.cpp/examples/retrieval/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/retrieval/retrieval.cpp +16 -16
- package/src/llama.cpp/examples/rpc/rpc-server.cpp +3 -1
- package/src/llama.cpp/examples/run/CMakeLists.txt +5 -0
- package/src/llama.cpp/examples/run/run.cpp +911 -0
- package/src/llama.cpp/examples/save-load-state/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +38 -21
- package/src/llama.cpp/examples/server/CMakeLists.txt +3 -16
- package/src/llama.cpp/examples/server/server.cpp +2073 -1339
- package/src/llama.cpp/examples/server/tests/requirements.txt +2 -2
- package/src/llama.cpp/examples/server/utils.hpp +354 -277
- package/src/llama.cpp/examples/simple/CMakeLists.txt +2 -2
- package/src/llama.cpp/examples/simple/simple.cpp +130 -94
- package/src/llama.cpp/examples/simple-chat/CMakeLists.txt +5 -0
- package/src/llama.cpp/examples/simple-chat/simple-chat.cpp +200 -0
- package/src/llama.cpp/examples/speculative/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/speculative/speculative.cpp +68 -64
- package/src/llama.cpp/examples/speculative-simple/CMakeLists.txt +5 -0
- package/src/llama.cpp/examples/speculative-simple/speculative-simple.cpp +265 -0
- package/src/llama.cpp/examples/tokenize/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/tokenize/tokenize.cpp +3 -3
- package/src/llama.cpp/examples/tts/CMakeLists.txt +5 -0
- package/src/llama.cpp/examples/tts/tts.cpp +932 -0
- package/src/llama.cpp/ggml/CMakeLists.txt +54 -36
- package/src/llama.cpp/ggml/include/ggml-backend.h +63 -34
- package/src/llama.cpp/ggml/include/ggml-blas.h +5 -3
- package/src/llama.cpp/ggml/include/ggml-cann.h +9 -7
- package/src/llama.cpp/ggml/include/ggml-cpp.h +38 -0
- package/src/llama.cpp/ggml/include/ggml-cpu.h +135 -0
- package/src/llama.cpp/ggml/include/ggml-cuda.h +12 -12
- package/src/llama.cpp/ggml/include/ggml-kompute.h +7 -3
- package/src/llama.cpp/ggml/include/ggml-metal.h +11 -7
- package/src/llama.cpp/ggml/include/ggml-opencl.h +26 -0
- package/src/llama.cpp/ggml/include/ggml-opt.h +216 -0
- package/src/llama.cpp/ggml/include/ggml-rpc.h +9 -5
- package/src/llama.cpp/ggml/include/ggml-sycl.h +18 -11
- package/src/llama.cpp/ggml/include/ggml-vulkan.h +10 -8
- package/src/llama.cpp/ggml/include/ggml.h +159 -417
- package/src/llama.cpp/ggml/src/CMakeLists.txt +121 -1155
- package/src/llama.cpp/ggml/src/ggml-alloc.c +23 -28
- package/src/llama.cpp/ggml/src/ggml-backend-impl.h +57 -36
- package/src/llama.cpp/ggml/src/ggml-backend-reg.cpp +552 -0
- package/src/llama.cpp/ggml/src/ggml-backend.cpp +306 -867
- package/src/llama.cpp/ggml/src/ggml-blas/CMakeLists.txt +87 -0
- package/src/llama.cpp/ggml/src/{ggml-blas.cpp → ggml-blas/ggml-blas.cpp} +216 -65
- package/src/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +76 -0
- package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +456 -111
- package/src/llama.cpp/ggml/src/ggml-cann/common.h +6 -3
- package/src/llama.cpp/ggml/src/{ggml-cann.cpp → ggml-cann/ggml-cann.cpp} +343 -177
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/CMakeLists.txt +2 -5
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/dup.cpp +22 -9
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_f16.cpp +24 -13
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_f32.cpp +23 -13
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_q4_0.cpp +11 -0
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/quantize_f16_q8_0.cpp +10 -0
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/quantize_f32_q8_0.cpp +10 -0
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp +17 -0
- package/src/llama.cpp/ggml/src/ggml-common.h +42 -42
- package/src/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +336 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/amx/amx.cpp +220 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/amx/amx.h +8 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/amx/common.h +91 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/amx/mmq.cpp +2511 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/amx/mmq.h +10 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/cpu-feats-x86.cpp +323 -0
- package/src/llama.cpp/ggml/src/{ggml-aarch64.c → ggml-cpu/ggml-cpu-aarch64.cpp} +1299 -246
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-aarch64.h +8 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-hbm.cpp +55 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-hbm.h +8 -0
- package/src/llama.cpp/ggml/src/{ggml-cpu-impl.h → ggml-cpu/ggml-cpu-impl.h} +14 -242
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.c +10835 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.h +63 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-traits.cpp +36 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-traits.h +38 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c +14123 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.cpp +628 -0
- package/src/llama.cpp/ggml/src/{llamafile → ggml-cpu/llamafile}/sgemm.cpp +666 -0
- package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +152 -0
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +8 -0
- package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +104 -0
- package/src/llama.cpp/ggml/src/ggml-impl.h +393 -22
- package/src/llama.cpp/ggml/src/ggml-kompute/CMakeLists.txt +166 -0
- package/src/llama.cpp/ggml/src/{ggml-kompute.cpp → ggml-kompute/ggml-kompute.cpp} +360 -127
- package/src/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +105 -0
- package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +288 -0
- package/src/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +107 -0
- package/src/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +147 -0
- package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +4004 -0
- package/src/llama.cpp/ggml/src/ggml-opt.cpp +854 -0
- package/src/llama.cpp/ggml/src/ggml-quants.c +188 -10702
- package/src/llama.cpp/ggml/src/ggml-quants.h +78 -125
- package/src/llama.cpp/ggml/src/ggml-rpc/CMakeLists.txt +9 -0
- package/src/llama.cpp/ggml/src/{ggml-rpc.cpp → ggml-rpc/ggml-rpc.cpp} +478 -300
- package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +84 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +3 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +36 -5
- package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +259 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/concat.cpp +3 -2
- package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +1 -1
- package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +5 -5
- package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +34 -35
- package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +1030 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +76 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +4 -4
- package/src/llama.cpp/ggml/src/{ggml-sycl.cpp → ggml-sycl/ggml-sycl.cpp} +3638 -4151
- package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +3 -2
- package/src/llama.cpp/ggml/src/ggml-sycl/mmq.cpp +6 -6
- package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +75 -87
- package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +7 -6
- package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +56 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/outprod.hpp +11 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/presets.hpp +6 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +4 -3
- package/src/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +7 -7
- package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +1 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +4 -4
- package/src/llama.cpp/ggml/src/ggml-sycl/wkv6.cpp +141 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/wkv6.hpp +10 -0
- package/src/llama.cpp/ggml/src/ggml-threading.cpp +12 -0
- package/src/llama.cpp/ggml/src/ggml-threading.h +14 -0
- package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +92 -0
- package/src/llama.cpp/ggml/src/{ggml-vulkan.cpp → ggml-vulkan/ggml-vulkan.cpp} +2138 -887
- package/src/llama.cpp/ggml/src/{vulkan-shaders → ggml-vulkan/vulkan-shaders}/CMakeLists.txt +3 -1
- package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +593 -0
- package/src/llama.cpp/ggml/src/ggml.c +4427 -20125
- package/src/llama.cpp/include/llama-cpp.h +25 -0
- package/src/llama.cpp/include/llama.h +93 -52
- package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.inp +112 -0
- package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.out +46 -0
- package/src/llama.cpp/pocs/CMakeLists.txt +3 -1
- package/src/llama.cpp/pocs/vdot/CMakeLists.txt +2 -2
- package/src/llama.cpp/pocs/vdot/q8dot.cpp +4 -3
- package/src/llama.cpp/pocs/vdot/vdot.cpp +8 -7
- package/src/llama.cpp/src/CMakeLists.txt +4 -8
- package/src/llama.cpp/src/llama-grammar.cpp +15 -15
- package/src/llama.cpp/src/llama-grammar.h +2 -5
- package/src/llama.cpp/src/llama-sampling.cpp +779 -194
- package/src/llama.cpp/src/llama-sampling.h +21 -2
- package/src/llama.cpp/src/llama-vocab.cpp +55 -10
- package/src/llama.cpp/src/llama-vocab.h +35 -11
- package/src/llama.cpp/src/llama.cpp +4317 -2979
- package/src/llama.cpp/src/unicode-data.cpp +2 -2
- package/src/llama.cpp/src/unicode.cpp +62 -51
- package/src/llama.cpp/src/unicode.h +9 -10
- package/src/llama.cpp/tests/CMakeLists.txt +48 -38
- package/src/llama.cpp/tests/test-arg-parser.cpp +15 -15
- package/src/llama.cpp/tests/test-backend-ops.cpp +324 -80
- package/src/llama.cpp/tests/test-barrier.cpp +1 -0
- package/src/llama.cpp/tests/test-chat-template.cpp +59 -9
- package/src/llama.cpp/tests/test-gguf.cpp +1303 -0
- package/src/llama.cpp/tests/test-grammar-integration.cpp +3 -6
- package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +17 -4
- package/src/llama.cpp/tests/test-llama-grammar.cpp +2 -4
- package/src/llama.cpp/tests/test-log.cpp +2 -2
- package/src/llama.cpp/tests/test-opt.cpp +853 -142
- package/src/llama.cpp/tests/test-quantize-fns.cpp +24 -21
- package/src/llama.cpp/tests/test-quantize-perf.cpp +16 -14
- package/src/llama.cpp/tests/test-rope.cpp +62 -20
- package/src/llama.cpp/tests/test-sampling.cpp +163 -138
- package/src/llama.cpp/tests/test-tokenizer-0.cpp +7 -7
- package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +5 -5
- package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +5 -5
- package/src/llama.cpp/.github/workflows/nix-ci-aarch64.yml +0 -72
- package/src/llama.cpp/.github/workflows/nix-ci.yml +0 -79
- package/src/llama.cpp/.github/workflows/nix-flake-update.yml +0 -22
- package/src/llama.cpp/.github/workflows/nix-publish-flake.yml +0 -36
- package/src/llama.cpp/common/train.cpp +0 -1515
- package/src/llama.cpp/common/train.h +0 -233
- package/src/llama.cpp/examples/baby-llama/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/baby-llama/baby-llama.cpp +0 -1639
- package/src/llama.cpp/ggml/src/ggml-aarch64.h +0 -39
- package/src/llama.cpp/ggml/src/vulkan-shaders/vulkan-shaders-gen.cpp +0 -600
- package/src/llama.cpp/tests/test-grad0.cpp +0 -1683
- /package/src/llama.cpp/ggml/{cmake → src/ggml-cpu/cmake}/FindSIMD.cmake +0 -0
- /package/src/llama.cpp/ggml/src/{llamafile → ggml-cpu/llamafile}/sgemm.h +0 -0
|
@@ -3,43 +3,79 @@
|
|
|
3
3
|
// GGML internal header
|
|
4
4
|
|
|
5
5
|
#include "ggml.h"
|
|
6
|
-
|
|
7
6
|
#include <assert.h>
|
|
7
|
+
#include <math.h>
|
|
8
8
|
#include <stdlib.h> // load `stdlib.h` before other headers to work around MinGW bug: https://sourceforge.net/p/mingw-w64/bugs/192/
|
|
9
9
|
#include <stdbool.h>
|
|
10
10
|
#include <stdint.h>
|
|
11
|
+
#include <string.h>
|
|
12
|
+
|
|
13
|
+
#ifdef __ARM_FEATURE_SVE
|
|
14
|
+
#include <arm_sve.h>
|
|
15
|
+
#endif // __ARM_FEATURE_SVE
|
|
16
|
+
|
|
17
|
+
#if defined(__ARM_NEON) && !defined(__CUDACC__)
|
|
18
|
+
// if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
|
|
19
|
+
//
|
|
20
|
+
// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
|
|
21
|
+
//
|
|
22
|
+
#include <arm_neon.h>
|
|
23
|
+
#endif
|
|
24
|
+
|
|
25
|
+
#if defined(__F16C__)
|
|
26
|
+
#include <immintrin.h>
|
|
27
|
+
#endif
|
|
11
28
|
|
|
12
29
|
#ifdef __cplusplus
|
|
13
30
|
extern "C" {
|
|
14
31
|
#endif
|
|
15
32
|
|
|
16
|
-
#
|
|
17
|
-
#
|
|
33
|
+
#ifndef MIN
|
|
34
|
+
# define MIN(a, b) ((a) < (b) ? (a) : (b))
|
|
35
|
+
#endif
|
|
36
|
+
|
|
37
|
+
#ifndef MAX
|
|
38
|
+
# define MAX(a, b) ((a) > (b) ? (a) : (b))
|
|
39
|
+
#endif
|
|
18
40
|
|
|
19
|
-
|
|
20
|
-
#define
|
|
41
|
+
// required for mmap as gguf only guarantees 32-byte alignment
|
|
42
|
+
#define TENSOR_ALIGNMENT 32
|
|
21
43
|
|
|
22
44
|
// static_assert should be a #define, but if it's not,
|
|
23
45
|
// fall back to the _Static_assert C11 keyword.
|
|
24
46
|
// if C99 - static_assert is noop
|
|
25
47
|
// ref: https://stackoverflow.com/a/53923785/4039976
|
|
26
48
|
#ifndef __cplusplus
|
|
27
|
-
#ifndef static_assert
|
|
28
|
-
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
|
|
29
|
-
#define static_assert(cond, msg) _Static_assert(cond, msg)
|
|
30
|
-
#else
|
|
31
|
-
#define static_assert(cond, msg) struct global_scope_noop_trick
|
|
32
|
-
#endif
|
|
33
|
-
#endif
|
|
49
|
+
#ifndef static_assert
|
|
50
|
+
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
|
|
51
|
+
#define static_assert(cond, msg) _Static_assert(cond, msg)
|
|
52
|
+
#else
|
|
53
|
+
#define static_assert(cond, msg) struct global_scope_noop_trick
|
|
54
|
+
#endif
|
|
55
|
+
#endif
|
|
34
56
|
#endif
|
|
35
57
|
|
|
58
|
+
static inline int ggml_up32(int n) {
|
|
59
|
+
return (n + 31) & ~31;
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
//static inline int ggml_up64(int n) {
|
|
63
|
+
// return (n + 63) & ~63;
|
|
64
|
+
//}
|
|
65
|
+
|
|
66
|
+
static inline int ggml_up(int n, int m) {
|
|
67
|
+
// assert m is a power of 2
|
|
68
|
+
GGML_ASSERT((m & (m - 1)) == 0);
|
|
69
|
+
return (n + m - 1) & ~(m - 1);
|
|
70
|
+
}
|
|
71
|
+
|
|
36
72
|
//
|
|
37
73
|
// logging
|
|
38
74
|
//
|
|
39
75
|
|
|
40
76
|
GGML_ATTRIBUTE_FORMAT(2, 3)
|
|
41
|
-
void ggml_log_internal (enum ggml_log_level level, const char * format, ...);
|
|
42
|
-
void ggml_log_callback_default(enum ggml_log_level level, const char * text, void * user_data);
|
|
77
|
+
GGML_API void ggml_log_internal (enum ggml_log_level level, const char * format, ...);
|
|
78
|
+
GGML_API void ggml_log_callback_default(enum ggml_log_level level, const char * text, void * user_data);
|
|
43
79
|
|
|
44
80
|
#define GGML_LOG(...) ggml_log_internal(GGML_LOG_LEVEL_NONE , __VA_ARGS__)
|
|
45
81
|
#define GGML_LOG_INFO(...) ggml_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__)
|
|
@@ -48,6 +84,72 @@ void ggml_log_callback_default(enum ggml_log_level level, const char * text, voi
|
|
|
48
84
|
#define GGML_LOG_DEBUG(...) ggml_log_internal(GGML_LOG_LEVEL_DEBUG, __VA_ARGS__)
|
|
49
85
|
#define GGML_LOG_CONT(...) ggml_log_internal(GGML_LOG_LEVEL_CONT , __VA_ARGS__)
|
|
50
86
|
|
|
87
|
+
#define GGML_DEBUG 0
|
|
88
|
+
|
|
89
|
+
#if (GGML_DEBUG >= 1)
|
|
90
|
+
#define GGML_PRINT_DEBUG(...) GGML_LOG_DEBUG(__VA_ARGS__)
|
|
91
|
+
#else
|
|
92
|
+
#define GGML_PRINT_DEBUG(...)
|
|
93
|
+
#endif
|
|
94
|
+
|
|
95
|
+
#if (GGML_DEBUG >= 5)
|
|
96
|
+
#define GGML_PRINT_DEBUG_5(...) GGML_LOG_DEBUG(__VA_ARGS__)
|
|
97
|
+
#else
|
|
98
|
+
#define GGML_PRINT_DEBUG_5(...)
|
|
99
|
+
#endif
|
|
100
|
+
|
|
101
|
+
#if (GGML_DEBUG >= 10)
|
|
102
|
+
#define GGML_PRINT_DEBUG_10(...) GGML_LOG_DEBUG(__VA_ARGS__)
|
|
103
|
+
#else
|
|
104
|
+
#define GGML_PRINT_DEBUG_10(...)
|
|
105
|
+
#endif
|
|
106
|
+
|
|
107
|
+
// tensor params
|
|
108
|
+
|
|
109
|
+
static void ggml_set_op_params(struct ggml_tensor * tensor, const void * params, size_t params_size) {
|
|
110
|
+
GGML_ASSERT(tensor != NULL); // silence -Warray-bounds warnings
|
|
111
|
+
assert(params_size <= GGML_MAX_OP_PARAMS);
|
|
112
|
+
memcpy(tensor->op_params, params, params_size);
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
static int32_t ggml_get_op_params_i32(const struct ggml_tensor * tensor, uint32_t i) {
|
|
116
|
+
assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
|
|
117
|
+
return ((const int32_t *)(tensor->op_params))[i];
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
static float ggml_get_op_params_f32(const struct ggml_tensor * tensor, uint32_t i) {
|
|
121
|
+
assert(i < GGML_MAX_OP_PARAMS / sizeof(float));
|
|
122
|
+
return ((const float *)(tensor->op_params))[i];
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
static void ggml_set_op_params_i32(struct ggml_tensor * tensor, uint32_t i, int32_t value) {
|
|
126
|
+
assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
|
|
127
|
+
((int32_t *)(tensor->op_params))[i] = value;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
static void ggml_set_op_params_f32(struct ggml_tensor * tensor, uint32_t i, float value) {
|
|
131
|
+
assert(i < GGML_MAX_OP_PARAMS / sizeof(float));
|
|
132
|
+
((float *)(tensor->op_params))[i] = value;
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
struct ggml_map_custom1_op_params {
|
|
136
|
+
ggml_custom1_op_t fun;
|
|
137
|
+
int n_tasks;
|
|
138
|
+
void * userdata;
|
|
139
|
+
};
|
|
140
|
+
|
|
141
|
+
struct ggml_map_custom2_op_params {
|
|
142
|
+
ggml_custom2_op_t fun;
|
|
143
|
+
int n_tasks;
|
|
144
|
+
void * userdata;
|
|
145
|
+
};
|
|
146
|
+
|
|
147
|
+
struct ggml_map_custom3_op_params {
|
|
148
|
+
ggml_custom3_op_t fun;
|
|
149
|
+
int n_tasks;
|
|
150
|
+
void * userdata;
|
|
151
|
+
};
|
|
152
|
+
|
|
51
153
|
// bitset
|
|
52
154
|
|
|
53
155
|
typedef uint32_t ggml_bitset_t;
|
|
@@ -96,7 +198,7 @@ void ggml_hash_set_reset(struct ggml_hash_set * hash_set);
|
|
|
96
198
|
static bool ggml_hash_contains(const struct ggml_hash_set * hash_set, struct ggml_tensor * key);
|
|
97
199
|
|
|
98
200
|
// returns GGML_HASHSET_FULL if table is full, otherwise the current index of the key or where it should be inserted
|
|
99
|
-
static size_t ggml_hash_find(const struct ggml_hash_set * hash_set, struct ggml_tensor * key);
|
|
201
|
+
static size_t ggml_hash_find(const struct ggml_hash_set * hash_set, const struct ggml_tensor * key);
|
|
100
202
|
|
|
101
203
|
// returns GGML_HASHSET_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full
|
|
102
204
|
static size_t ggml_hash_insert(struct ggml_hash_set * hash_set, struct ggml_tensor * key);
|
|
@@ -110,7 +212,7 @@ static inline size_t ggml_hash(const struct ggml_tensor * p) {
|
|
|
110
212
|
return (size_t)(uintptr_t)p >> 4;
|
|
111
213
|
}
|
|
112
214
|
|
|
113
|
-
static size_t ggml_hash_find(const struct ggml_hash_set * hash_set, struct ggml_tensor * key) {
|
|
215
|
+
static size_t ggml_hash_find(const struct ggml_hash_set * hash_set, const struct ggml_tensor * key) {
|
|
114
216
|
size_t h = ggml_hash(key) % hash_set->size;
|
|
115
217
|
|
|
116
218
|
// linear probing
|
|
@@ -181,21 +283,290 @@ enum ggml_cgraph_eval_order {
|
|
|
181
283
|
};
|
|
182
284
|
|
|
183
285
|
struct ggml_cgraph {
|
|
184
|
-
int size;
|
|
185
|
-
int n_nodes;
|
|
186
|
-
int n_leafs;
|
|
286
|
+
int size; // maximum number of nodes/leafs/grads/grad_accs
|
|
287
|
+
int n_nodes; // number of nodes currently in use
|
|
288
|
+
int n_leafs; // number of leafs currently in use
|
|
187
289
|
|
|
188
|
-
struct ggml_tensor ** nodes;
|
|
189
|
-
struct ggml_tensor ** grads;
|
|
190
|
-
struct ggml_tensor **
|
|
290
|
+
struct ggml_tensor ** nodes; // tensors with data that can change if the graph is evaluated
|
|
291
|
+
struct ggml_tensor ** grads; // the outputs of these tensors are the gradients of the nodes
|
|
292
|
+
struct ggml_tensor ** grad_accs; // accumulators for node gradients
|
|
293
|
+
struct ggml_tensor ** leafs; // tensors with constant data
|
|
191
294
|
|
|
192
295
|
struct ggml_hash_set visited_hash_set;
|
|
193
296
|
|
|
194
297
|
enum ggml_cgraph_eval_order order;
|
|
195
298
|
};
|
|
196
299
|
|
|
300
|
+
// returns a slice of cgraph with nodes [i0, i1)
|
|
301
|
+
// the slice does not have leafs or gradients
|
|
302
|
+
// if you need the gradients, get them from the original graph
|
|
197
303
|
struct ggml_cgraph ggml_graph_view(struct ggml_cgraph * cgraph, int i0, int i1);
|
|
198
304
|
|
|
305
|
+
// Memory allocation
|
|
306
|
+
|
|
307
|
+
GGML_API void * ggml_aligned_malloc(size_t size);
|
|
308
|
+
GGML_API void ggml_aligned_free(void * ptr, size_t size);
|
|
309
|
+
|
|
310
|
+
// FP16 to FP32 conversion
|
|
311
|
+
|
|
312
|
+
#if defined(__ARM_NEON)
|
|
313
|
+
#if defined(_MSC_VER) || (defined(__CUDACC__) && __CUDACC_VER_MAJOR__ <= 11)
|
|
314
|
+
typedef uint16_t ggml_fp16_internal_t;
|
|
315
|
+
#else
|
|
316
|
+
typedef __fp16 ggml_fp16_internal_t;
|
|
317
|
+
#endif
|
|
318
|
+
#endif
|
|
319
|
+
|
|
320
|
+
#if defined(__ARM_NEON) && !defined(_MSC_VER) && !(defined(__CUDACC__) && __CUDACC_VER_MAJOR__ <= 11)
|
|
321
|
+
#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
|
322
|
+
#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
|
|
323
|
+
|
|
324
|
+
#define GGML_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
|
325
|
+
|
|
326
|
+
static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
|
|
327
|
+
ggml_fp16_internal_t tmp;
|
|
328
|
+
memcpy(&tmp, &h, sizeof(ggml_fp16_t));
|
|
329
|
+
return (float)tmp;
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
|
|
333
|
+
ggml_fp16_t res;
|
|
334
|
+
ggml_fp16_internal_t tmp = f;
|
|
335
|
+
memcpy(&res, &tmp, sizeof(ggml_fp16_t));
|
|
336
|
+
return res;
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
#elif defined(__F16C__)
|
|
340
|
+
|
|
341
|
+
#ifdef _MSC_VER
|
|
342
|
+
#define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
|
|
343
|
+
#define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
|
|
344
|
+
#else
|
|
345
|
+
#define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
|
|
346
|
+
#define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
|
|
347
|
+
#endif
|
|
348
|
+
|
|
349
|
+
#elif defined(__POWER9_VECTOR__)
|
|
350
|
+
|
|
351
|
+
#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
|
352
|
+
#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
|
|
353
|
+
/* the inline asm below is about 12% faster than the lookup method */
|
|
354
|
+
#define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x)
|
|
355
|
+
#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
|
|
356
|
+
|
|
357
|
+
static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
|
|
358
|
+
register float f;
|
|
359
|
+
register double d;
|
|
360
|
+
__asm__(
|
|
361
|
+
"mtfprd %0,%2\n"
|
|
362
|
+
"xscvhpdp %0,%0\n"
|
|
363
|
+
"frsp %1,%0\n" :
|
|
364
|
+
/* temp */ "=d"(d),
|
|
365
|
+
/* out */ "=f"(f):
|
|
366
|
+
/* in */ "r"(h));
|
|
367
|
+
return f;
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
|
|
371
|
+
register double d;
|
|
372
|
+
register ggml_fp16_t r;
|
|
373
|
+
__asm__( /* xscvdphp can work on double or single precision */
|
|
374
|
+
"xscvdphp %0,%2\n"
|
|
375
|
+
"mffprd %1,%0\n" :
|
|
376
|
+
/* temp */ "=d"(d),
|
|
377
|
+
/* out */ "=r"(r):
|
|
378
|
+
/* in */ "f"(f));
|
|
379
|
+
return r;
|
|
380
|
+
}
|
|
381
|
+
|
|
382
|
+
#else
|
|
383
|
+
|
|
384
|
+
// FP16 <-> FP32
|
|
385
|
+
// ref: https://github.com/Maratyszcza/FP16
|
|
386
|
+
|
|
387
|
+
static inline float fp32_from_bits(uint32_t w) {
|
|
388
|
+
union {
|
|
389
|
+
uint32_t as_bits;
|
|
390
|
+
float as_value;
|
|
391
|
+
} fp32;
|
|
392
|
+
fp32.as_bits = w;
|
|
393
|
+
return fp32.as_value;
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
static inline uint32_t fp32_to_bits(float f) {
|
|
397
|
+
union {
|
|
398
|
+
float as_value;
|
|
399
|
+
uint32_t as_bits;
|
|
400
|
+
} fp32;
|
|
401
|
+
fp32.as_value = f;
|
|
402
|
+
return fp32.as_bits;
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
|
|
406
|
+
const uint32_t w = (uint32_t) h << 16;
|
|
407
|
+
const uint32_t sign = w & UINT32_C(0x80000000);
|
|
408
|
+
const uint32_t two_w = w + w;
|
|
409
|
+
|
|
410
|
+
const uint32_t exp_offset = UINT32_C(0xE0) << 23;
|
|
411
|
+
#if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L)
|
|
412
|
+
const float exp_scale = 0x1.0p-112f;
|
|
413
|
+
#else
|
|
414
|
+
const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
|
|
415
|
+
#endif
|
|
416
|
+
const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
|
|
417
|
+
|
|
418
|
+
const uint32_t magic_mask = UINT32_C(126) << 23;
|
|
419
|
+
const float magic_bias = 0.5f;
|
|
420
|
+
const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
|
|
421
|
+
|
|
422
|
+
const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
|
|
423
|
+
const uint32_t result = sign |
|
|
424
|
+
(two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
|
|
425
|
+
return fp32_from_bits(result);
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
|
|
429
|
+
#if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L)
|
|
430
|
+
const float scale_to_inf = 0x1.0p+112f;
|
|
431
|
+
const float scale_to_zero = 0x1.0p-110f;
|
|
432
|
+
#else
|
|
433
|
+
const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
|
|
434
|
+
const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
|
|
435
|
+
#endif
|
|
436
|
+
float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
|
|
437
|
+
|
|
438
|
+
const uint32_t w = fp32_to_bits(f);
|
|
439
|
+
const uint32_t shl1_w = w + w;
|
|
440
|
+
const uint32_t sign = w & UINT32_C(0x80000000);
|
|
441
|
+
uint32_t bias = shl1_w & UINT32_C(0xFF000000);
|
|
442
|
+
if (bias < UINT32_C(0x71000000)) {
|
|
443
|
+
bias = UINT32_C(0x71000000);
|
|
444
|
+
}
|
|
445
|
+
|
|
446
|
+
base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
|
|
447
|
+
const uint32_t bits = fp32_to_bits(base);
|
|
448
|
+
const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
|
|
449
|
+
const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
|
|
450
|
+
const uint32_t nonsign = exp_bits + mantissa_bits;
|
|
451
|
+
return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
|
455
|
+
#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
|
|
456
|
+
|
|
457
|
+
#endif // defined(__ARM_NEON) && (!defined(__MSC_VER)
|
|
458
|
+
|
|
459
|
+
// precomputed f32 table for f16 (256 KB)
|
|
460
|
+
// defined in ggml.c, initialized in ggml_init()
|
|
461
|
+
GGML_API float ggml_table_f32_f16[1 << 16];
|
|
462
|
+
|
|
463
|
+
// On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32,
|
|
464
|
+
// so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON.
|
|
465
|
+
// This is also true for POWER9.
|
|
466
|
+
#if !defined(GGML_FP16_TO_FP32)
|
|
467
|
+
inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
|
|
468
|
+
uint16_t s;
|
|
469
|
+
memcpy(&s, &f, sizeof(uint16_t));
|
|
470
|
+
return ggml_table_f32_f16[s];
|
|
471
|
+
}
|
|
472
|
+
|
|
473
|
+
#define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x)
|
|
474
|
+
#endif
|
|
475
|
+
|
|
476
|
+
#if !defined(GGML_FP32_TO_FP16)
|
|
477
|
+
#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
|
|
478
|
+
#endif
|
|
479
|
+
|
|
480
|
+
/**
|
|
481
|
+
* Converts brain16 to float32.
|
|
482
|
+
*
|
|
483
|
+
* The bfloat16 floating point format has the following structure:
|
|
484
|
+
*
|
|
485
|
+
* ┌sign
|
|
486
|
+
* │
|
|
487
|
+
* │ ┌exponent
|
|
488
|
+
* │ │
|
|
489
|
+
* │ │ ┌mantissa
|
|
490
|
+
* │ │ │
|
|
491
|
+
* │┌──┴───┐┌─┴───┐
|
|
492
|
+
* 0b0000000000000000 brain16
|
|
493
|
+
*
|
|
494
|
+
* Since bf16 has the same number of exponent bits as a 32bit float,
|
|
495
|
+
* encoding and decoding numbers becomes relatively straightforward.
|
|
496
|
+
*
|
|
497
|
+
* ┌sign
|
|
498
|
+
* │
|
|
499
|
+
* │ ┌exponent
|
|
500
|
+
* │ │
|
|
501
|
+
* │ │ ┌mantissa
|
|
502
|
+
* │ │ │
|
|
503
|
+
* │┌──┴───┐┌─┴───────────────────┐
|
|
504
|
+
* 0b00000000000000000000000000000000 IEEE binary32
|
|
505
|
+
*
|
|
506
|
+
* For comparison, the standard fp16 format has fewer exponent bits.
|
|
507
|
+
*
|
|
508
|
+
* ┌sign
|
|
509
|
+
* │
|
|
510
|
+
* │ ┌exponent
|
|
511
|
+
* │ │
|
|
512
|
+
* │ │ ┌mantissa
|
|
513
|
+
* │ │ │
|
|
514
|
+
* │┌─┴─┐┌─┴──────┐
|
|
515
|
+
* 0b0000000000000000 IEEE binary16
|
|
516
|
+
*
|
|
517
|
+
* @see IEEE 754-2008
|
|
518
|
+
*/
|
|
519
|
+
static inline float ggml_compute_bf16_to_fp32(ggml_bf16_t h) {
|
|
520
|
+
union {
|
|
521
|
+
float f;
|
|
522
|
+
uint32_t i;
|
|
523
|
+
} u;
|
|
524
|
+
u.i = (uint32_t)h.bits << 16;
|
|
525
|
+
return u.f;
|
|
526
|
+
}
|
|
527
|
+
|
|
528
|
+
/**
|
|
529
|
+
* Converts float32 to brain16.
|
|
530
|
+
*
|
|
531
|
+
* This is binary identical with Google Brain float conversion.
|
|
532
|
+
* Floats shall round to nearest even, and NANs shall be quiet.
|
|
533
|
+
* Subnormals aren't flushed to zero, except perhaps when used.
|
|
534
|
+
* This code should vectorize nicely if using modern compilers.
|
|
535
|
+
*/
|
|
536
|
+
static inline ggml_bf16_t ggml_compute_fp32_to_bf16(float s) {
|
|
537
|
+
ggml_bf16_t h;
|
|
538
|
+
union {
|
|
539
|
+
float f;
|
|
540
|
+
uint32_t i;
|
|
541
|
+
} u;
|
|
542
|
+
u.f = s;
|
|
543
|
+
if ((u.i & 0x7fffffff) > 0x7f800000) { /* nan */
|
|
544
|
+
h.bits = (u.i >> 16) | 64; /* force to quiet */
|
|
545
|
+
return h;
|
|
546
|
+
}
|
|
547
|
+
h.bits = (u.i + (0x7fff + ((u.i >> 16) & 1))) >> 16;
|
|
548
|
+
return h;
|
|
549
|
+
}
|
|
550
|
+
|
|
551
|
+
#define GGML_FP32_TO_BF16(x) ggml_compute_fp32_to_bf16(x)
|
|
552
|
+
#define GGML_BF16_TO_FP32(x) ggml_compute_bf16_to_fp32(x)
|
|
553
|
+
|
|
554
|
+
// expose GGUF internals for test code
|
|
555
|
+
|
|
556
|
+
GGML_API size_t gguf_type_size(enum gguf_type type);
|
|
557
|
+
|
|
558
|
+
GGML_API struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_params params);
|
|
559
|
+
|
|
560
|
+
struct gguf_buf {
|
|
561
|
+
void * data;
|
|
562
|
+
size_t size;
|
|
563
|
+
size_t offset;
|
|
564
|
+
};
|
|
565
|
+
GGML_API struct gguf_buf gguf_buf_init(size_t size);
|
|
566
|
+
GGML_API void gguf_buf_free(struct gguf_buf buf);
|
|
567
|
+
|
|
568
|
+
GGML_API void gguf_write_to_buf(const struct gguf_context * ctx, struct gguf_buf * buf, bool only_meta);
|
|
569
|
+
|
|
199
570
|
#ifdef __cplusplus
|
|
200
571
|
}
|
|
201
572
|
#endif
|
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
|
|
2
|
+
find_package(Vulkan COMPONENTS glslc REQUIRED)
|
|
3
|
+
find_program(glslc_executable NAMES glslc HINTS Vulkan::glslc)
|
|
4
|
+
|
|
5
|
+
if (NOT glslc_executable)
|
|
6
|
+
message(FATAL_ERROR "glslc not found")
|
|
7
|
+
endif()
|
|
8
|
+
|
|
9
|
+
ggml_add_backend_library(ggml-kompute
|
|
10
|
+
ggml-kompute.cpp
|
|
11
|
+
../../include/ggml-kompute.h
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
target_link_libraries(ggml-kompute PRIVATE ggml-base kompute)
|
|
15
|
+
target_include_directories(ggml-kompute PRIVATE ${CMAKE_CURRENT_BINARY_DIR})
|
|
16
|
+
|
|
17
|
+
add_compile_definitions(VULKAN_HPP_DISPATCH_LOADER_DYNAMIC=1)
|
|
18
|
+
|
|
19
|
+
function(compile_shader)
|
|
20
|
+
set(options)
|
|
21
|
+
set(oneValueArgs)
|
|
22
|
+
set(multiValueArgs SOURCES)
|
|
23
|
+
cmake_parse_arguments(compile_shader "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
|
24
|
+
foreach(source ${compile_shader_SOURCES})
|
|
25
|
+
get_filename_component(filename ${source} NAME)
|
|
26
|
+
set(spv_file ${filename}.spv)
|
|
27
|
+
add_custom_command(
|
|
28
|
+
OUTPUT ${spv_file}
|
|
29
|
+
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${source}
|
|
30
|
+
${CMAKE_CURRENT_SOURCE_DIR}/kompute-shaders/common.comp
|
|
31
|
+
${CMAKE_CURRENT_SOURCE_DIR}/kompute-shaders/op_getrows.comp
|
|
32
|
+
${CMAKE_CURRENT_SOURCE_DIR}/kompute-shaders/op_mul_mv_q_n_pre.comp
|
|
33
|
+
${CMAKE_CURRENT_SOURCE_DIR}/kompute-shaders/op_mul_mv_q_n.comp
|
|
34
|
+
COMMAND ${glslc_executable} --target-env=vulkan1.2 -o ${spv_file} ${CMAKE_CURRENT_SOURCE_DIR}/${source}
|
|
35
|
+
COMMENT "Compiling ${source} to ${spv_file}"
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
get_filename_component(RAW_FILE_NAME ${spv_file} NAME)
|
|
39
|
+
set(FILE_NAME "shader${RAW_FILE_NAME}")
|
|
40
|
+
string(REPLACE ".comp.spv" ".h" HEADER_FILE ${FILE_NAME})
|
|
41
|
+
string(TOUPPER ${HEADER_FILE} HEADER_FILE_DEFINE)
|
|
42
|
+
string(REPLACE "." "_" HEADER_FILE_DEFINE "${HEADER_FILE_DEFINE}")
|
|
43
|
+
set(OUTPUT_HEADER_FILE "${HEADER_FILE}")
|
|
44
|
+
message(STATUS "${HEADER_FILE} generating ${HEADER_FILE_DEFINE}")
|
|
45
|
+
if(CMAKE_GENERATOR MATCHES "Visual Studio")
|
|
46
|
+
add_custom_command(
|
|
47
|
+
OUTPUT ${OUTPUT_HEADER_FILE}
|
|
48
|
+
COMMAND ${CMAKE_COMMAND} -E echo "/*THIS FILE HAS BEEN AUTOMATICALLY GENERATED - DO NOT EDIT*/" > ${OUTPUT_HEADER_FILE}
|
|
49
|
+
COMMAND ${CMAKE_COMMAND} -E echo \"\#ifndef ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE}
|
|
50
|
+
COMMAND ${CMAKE_COMMAND} -E echo \"\#define ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE}
|
|
51
|
+
COMMAND ${CMAKE_COMMAND} -E echo "namespace kp {" >> ${OUTPUT_HEADER_FILE}
|
|
52
|
+
COMMAND ${CMAKE_COMMAND} -E echo "namespace shader_data {" >> ${OUTPUT_HEADER_FILE}
|
|
53
|
+
COMMAND ${CMAKE_BINARY_DIR}/bin/$<CONFIG>/xxd -i ${RAW_FILE_NAME} >> ${OUTPUT_HEADER_FILE}
|
|
54
|
+
COMMAND ${CMAKE_COMMAND} -E echo "}}" >> ${OUTPUT_HEADER_FILE}
|
|
55
|
+
COMMAND ${CMAKE_COMMAND} -E echo \"\#endif // define ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE}
|
|
56
|
+
DEPENDS ${spv_file} xxd
|
|
57
|
+
COMMENT "Converting to hpp: ${FILE_NAME} ${CMAKE_BINARY_DIR}/bin/$<CONFIG>/xxd"
|
|
58
|
+
)
|
|
59
|
+
else()
|
|
60
|
+
add_custom_command(
|
|
61
|
+
OUTPUT ${OUTPUT_HEADER_FILE}
|
|
62
|
+
COMMAND ${CMAKE_COMMAND} -E echo "/*THIS FILE HAS BEEN AUTOMATICALLY GENERATED - DO NOT EDIT*/" > ${OUTPUT_HEADER_FILE}
|
|
63
|
+
COMMAND ${CMAKE_COMMAND} -E echo \"\#ifndef ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE}
|
|
64
|
+
COMMAND ${CMAKE_COMMAND} -E echo \"\#define ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE}
|
|
65
|
+
COMMAND ${CMAKE_COMMAND} -E echo "namespace kp {" >> ${OUTPUT_HEADER_FILE}
|
|
66
|
+
COMMAND ${CMAKE_COMMAND} -E echo "namespace shader_data {" >> ${OUTPUT_HEADER_FILE}
|
|
67
|
+
COMMAND ${CMAKE_BINARY_DIR}/bin/xxd -i ${RAW_FILE_NAME} >> ${OUTPUT_HEADER_FILE}
|
|
68
|
+
COMMAND ${CMAKE_COMMAND} -E echo "}}" >> ${OUTPUT_HEADER_FILE}
|
|
69
|
+
COMMAND ${CMAKE_COMMAND} -E echo \"\#endif // define ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE}
|
|
70
|
+
DEPENDS ${spv_file} xxd
|
|
71
|
+
COMMENT "Converting to hpp: ${FILE_NAME} ${CMAKE_BINARY_DIR}/bin/xxd"
|
|
72
|
+
)
|
|
73
|
+
endif()
|
|
74
|
+
endforeach()
|
|
75
|
+
endfunction()
|
|
76
|
+
|
|
77
|
+
if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/kompute/CMakeLists.txt")
|
|
78
|
+
message(STATUS "Kompute found")
|
|
79
|
+
set(KOMPUTE_OPT_LOG_LEVEL Error CACHE STRING "Kompute log level")
|
|
80
|
+
add_subdirectory(kompute)
|
|
81
|
+
|
|
82
|
+
# Compile our shaders
|
|
83
|
+
compile_shader(SOURCES
|
|
84
|
+
kompute-shaders/op_scale.comp
|
|
85
|
+
kompute-shaders/op_scale_8.comp
|
|
86
|
+
kompute-shaders/op_add.comp
|
|
87
|
+
kompute-shaders/op_addrow.comp
|
|
88
|
+
kompute-shaders/op_mul.comp
|
|
89
|
+
kompute-shaders/op_silu.comp
|
|
90
|
+
kompute-shaders/op_relu.comp
|
|
91
|
+
kompute-shaders/op_gelu.comp
|
|
92
|
+
kompute-shaders/op_softmax.comp
|
|
93
|
+
kompute-shaders/op_norm.comp
|
|
94
|
+
kompute-shaders/op_rmsnorm.comp
|
|
95
|
+
kompute-shaders/op_diagmask.comp
|
|
96
|
+
kompute-shaders/op_mul_mat_mat_f32.comp
|
|
97
|
+
kompute-shaders/op_mul_mat_f16.comp
|
|
98
|
+
kompute-shaders/op_mul_mat_q8_0.comp
|
|
99
|
+
kompute-shaders/op_mul_mat_q4_0.comp
|
|
100
|
+
kompute-shaders/op_mul_mat_q4_1.comp
|
|
101
|
+
kompute-shaders/op_mul_mat_q4_k.comp
|
|
102
|
+
kompute-shaders/op_mul_mat_q6_k.comp
|
|
103
|
+
kompute-shaders/op_getrows_f32.comp
|
|
104
|
+
kompute-shaders/op_getrows_f16.comp
|
|
105
|
+
kompute-shaders/op_getrows_q4_0.comp
|
|
106
|
+
kompute-shaders/op_getrows_q4_1.comp
|
|
107
|
+
kompute-shaders/op_getrows_q6_k.comp
|
|
108
|
+
kompute-shaders/op_rope_norm_f16.comp
|
|
109
|
+
kompute-shaders/op_rope_norm_f32.comp
|
|
110
|
+
kompute-shaders/op_rope_neox_f16.comp
|
|
111
|
+
kompute-shaders/op_rope_neox_f32.comp
|
|
112
|
+
kompute-shaders/op_cpy_f16_f16.comp
|
|
113
|
+
kompute-shaders/op_cpy_f16_f32.comp
|
|
114
|
+
kompute-shaders/op_cpy_f32_f16.comp
|
|
115
|
+
kompute-shaders/op_cpy_f32_f32.comp
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
# Create a custom target for our generated shaders
|
|
119
|
+
add_custom_target(generated_shaders DEPENDS
|
|
120
|
+
shaderop_scale.h
|
|
121
|
+
shaderop_scale_8.h
|
|
122
|
+
shaderop_add.h
|
|
123
|
+
shaderop_addrow.h
|
|
124
|
+
shaderop_mul.h
|
|
125
|
+
shaderop_silu.h
|
|
126
|
+
shaderop_relu.h
|
|
127
|
+
shaderop_gelu.h
|
|
128
|
+
shaderop_softmax.h
|
|
129
|
+
shaderop_norm.h
|
|
130
|
+
shaderop_rmsnorm.h
|
|
131
|
+
shaderop_diagmask.h
|
|
132
|
+
shaderop_mul_mat_mat_f32.h
|
|
133
|
+
shaderop_mul_mat_f16.h
|
|
134
|
+
shaderop_mul_mat_q8_0.h
|
|
135
|
+
shaderop_mul_mat_q4_0.h
|
|
136
|
+
shaderop_mul_mat_q4_1.h
|
|
137
|
+
shaderop_mul_mat_q4_k.h
|
|
138
|
+
shaderop_mul_mat_q6_k.h
|
|
139
|
+
shaderop_getrows_f32.h
|
|
140
|
+
shaderop_getrows_f16.h
|
|
141
|
+
shaderop_getrows_q4_0.h
|
|
142
|
+
shaderop_getrows_q4_1.h
|
|
143
|
+
shaderop_getrows_q6_k.h
|
|
144
|
+
shaderop_rope_norm_f16.h
|
|
145
|
+
shaderop_rope_norm_f32.h
|
|
146
|
+
shaderop_rope_neox_f16.h
|
|
147
|
+
shaderop_rope_neox_f32.h
|
|
148
|
+
shaderop_cpy_f16_f16.h
|
|
149
|
+
shaderop_cpy_f16_f32.h
|
|
150
|
+
shaderop_cpy_f32_f16.h
|
|
151
|
+
shaderop_cpy_f32_f32.h
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
# Create a custom command that depends on the generated_shaders
|
|
155
|
+
add_custom_command(
|
|
156
|
+
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/ggml-kompute.stamp
|
|
157
|
+
COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_CURRENT_BINARY_DIR}/ggml-kompute.stamp
|
|
158
|
+
DEPENDS generated_shaders
|
|
159
|
+
COMMENT "Ensuring shaders are generated before compiling ggml-kompute.cpp"
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# Add the stamp to the main sources to ensure dependency tracking
|
|
163
|
+
target_sources(ggml-kompute PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/ggml-kompute.stamp)
|
|
164
|
+
else()
|
|
165
|
+
message(WARNING "Kompute not found")
|
|
166
|
+
endif()
|