@fugood/llama.node 0.3.16 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CMakeLists.txt +6 -1
- package/bin/darwin/arm64/llama-node.node +0 -0
- package/bin/darwin/x64/llama-node.node +0 -0
- package/bin/linux/arm64/llama-node.node +0 -0
- package/bin/linux/x64/llama-node.node +0 -0
- package/bin/linux-cuda/arm64/llama-node.node +0 -0
- package/bin/linux-cuda/x64/llama-node.node +0 -0
- package/bin/linux-vulkan/arm64/llama-node.node +0 -0
- package/bin/linux-vulkan/x64/llama-node.node +0 -0
- package/bin/win32/arm64/llama-node.node +0 -0
- package/bin/win32/arm64/node.lib +0 -0
- package/bin/win32/x64/llama-node.node +0 -0
- package/bin/win32/x64/node.lib +0 -0
- package/bin/win32-vulkan/arm64/llama-node.node +0 -0
- package/bin/win32-vulkan/arm64/node.lib +0 -0
- package/bin/win32-vulkan/x64/llama-node.node +0 -0
- package/bin/win32-vulkan/x64/node.lib +0 -0
- package/lib/binding.ts +44 -2
- package/lib/index.js +132 -1
- package/lib/index.ts +203 -3
- package/package.json +2 -1
- package/src/EmbeddingWorker.cpp +1 -1
- package/src/LlamaCompletionWorker.cpp +374 -19
- package/src/LlamaCompletionWorker.h +31 -10
- package/src/LlamaContext.cpp +216 -7
- package/src/LlamaContext.h +12 -0
- package/src/common.hpp +15 -0
- package/src/llama.cpp/.github/workflows/build-linux-cross.yml +233 -0
- package/src/llama.cpp/.github/workflows/build.yml +89 -767
- package/src/llama.cpp/.github/workflows/docker.yml +9 -6
- package/src/llama.cpp/.github/workflows/release.yml +716 -0
- package/src/llama.cpp/.github/workflows/server.yml +19 -23
- package/src/llama.cpp/CMakeLists.txt +11 -1
- package/src/llama.cpp/cmake/build-info.cmake +8 -2
- package/src/llama.cpp/cmake/x64-windows-llvm.cmake +0 -6
- package/src/llama.cpp/common/CMakeLists.txt +35 -4
- package/src/llama.cpp/common/arg.cpp +844 -121
- package/src/llama.cpp/common/arg.h +9 -0
- package/src/llama.cpp/common/chat.cpp +129 -107
- package/src/llama.cpp/common/chat.h +2 -0
- package/src/llama.cpp/common/common.cpp +64 -518
- package/src/llama.cpp/common/common.h +35 -45
- package/src/llama.cpp/common/json-schema-to-grammar.cpp +3 -0
- package/src/llama.cpp/common/llguidance.cpp +31 -47
- package/src/llama.cpp/common/minja/chat-template.hpp +23 -11
- package/src/llama.cpp/common/minja/minja.hpp +186 -127
- package/src/llama.cpp/common/regex-partial.cpp +204 -0
- package/src/llama.cpp/common/regex-partial.h +56 -0
- package/src/llama.cpp/common/sampling.cpp +60 -50
- package/src/llama.cpp/docs/build.md +122 -7
- package/src/llama.cpp/examples/CMakeLists.txt +2 -32
- package/src/llama.cpp/examples/batched/batched.cpp +1 -1
- package/src/llama.cpp/examples/embedding/embedding.cpp +9 -12
- package/src/llama.cpp/examples/gritlm/gritlm.cpp +1 -1
- package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +1 -0
- package/src/llama.cpp/examples/parallel/parallel.cpp +89 -15
- package/src/llama.cpp/examples/passkey/passkey.cpp +1 -1
- package/src/llama.cpp/examples/speculative/speculative.cpp +1 -1
- package/src/llama.cpp/examples/speculative-simple/speculative-simple.cpp +1 -1
- package/src/llama.cpp/examples/sycl/build.sh +2 -2
- package/src/llama.cpp/examples/sycl/win-build-sycl.bat +2 -2
- package/src/llama.cpp/examples/training/CMakeLists.txt +5 -0
- package/src/llama.cpp/examples/training/finetune.cpp +96 -0
- package/src/llama.cpp/ggml/CMakeLists.txt +35 -2
- package/src/llama.cpp/ggml/cmake/GitVars.cmake +22 -0
- package/src/llama.cpp/ggml/include/ggml-backend.h +4 -4
- package/src/llama.cpp/ggml/include/ggml-cpp.h +1 -1
- package/src/llama.cpp/ggml/include/ggml-cpu.h +5 -0
- package/src/llama.cpp/ggml/include/ggml-opt.h +47 -28
- package/src/llama.cpp/ggml/include/ggml-rpc.h +6 -1
- package/src/llama.cpp/ggml/include/ggml.h +76 -106
- package/src/llama.cpp/ggml/src/CMakeLists.txt +11 -8
- package/src/llama.cpp/ggml/src/ggml-alloc.c +4 -1
- package/src/llama.cpp/ggml/src/ggml-backend.cpp +9 -5
- package/src/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +0 -2
- package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +8 -4
- package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +5 -5
- package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +692 -1534
- package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +613 -122
- package/src/llama.cpp/ggml/src/ggml-cann/common.h +135 -1
- package/src/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +507 -137
- package/src/llama.cpp/ggml/src/ggml-common.h +12 -6
- package/src/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +66 -33
- package/src/llama.cpp/ggml/src/ggml-cpu/binary-ops.cpp +158 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/binary-ops.h +16 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/common.h +72 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/cpu-feats-x86.cpp +1 -1
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +896 -194
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-impl.h +2 -21
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.c +1060 -410
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c +1008 -13533
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.cpp +31 -16
- package/src/llama.cpp/ggml/src/ggml-cpu/kleidiai/kernels.cpp +90 -12
- package/src/llama.cpp/ggml/src/ggml-cpu/kleidiai/kernels.h +47 -13
- package/src/llama.cpp/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +266 -72
- package/src/llama.cpp/ggml/src/ggml-cpu/llamafile/sgemm.cpp +1034 -88
- package/src/llama.cpp/ggml/src/ggml-cpu/ops.cpp +8796 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/ops.h +110 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/simd-mappings.h +892 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/unary-ops.cpp +186 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/unary-ops.h +28 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/vec.cpp +252 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/vec.h +802 -0
- package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +23 -4
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +7 -0
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +1 -0
- package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +0 -4
- package/src/llama.cpp/ggml/src/ggml-impl.h +52 -18
- package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +106 -14
- package/src/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +67 -119
- package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +1023 -262
- package/src/llama.cpp/ggml/src/ggml-opt.cpp +368 -190
- package/src/llama.cpp/ggml/src/ggml-quants.c +0 -6
- package/src/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +307 -40
- package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +125 -45
- package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +10 -8
- package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp +239 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.hpp +39 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +0 -35
- package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +9 -307
- package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +72 -25
- package/src/llama.cpp/ggml/src/ggml-sycl/convert.hpp +14 -7
- package/src/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +59 -21
- package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +7 -1
- package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +79 -90
- package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +944 -438
- package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +22 -23
- package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +37 -8
- package/src/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +24 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/getrows.hpp +1 -4
- package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +507 -411
- package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +84 -74
- package/src/llama.cpp/ggml/src/ggml-sycl/im2col.hpp +1 -3
- package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +185 -89
- package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +37 -49
- package/src/llama.cpp/ggml/src/ggml-sycl/norm.hpp +7 -22
- package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +4 -14
- package/src/llama.cpp/ggml/src/ggml-sycl/quants.hpp +83 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +204 -118
- package/src/llama.cpp/ggml/src/ggml-sycl/rope.hpp +1 -3
- package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +128 -53
- package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +83 -49
- package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +1278 -282
- package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +32 -0
- package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +133 -30
- package/src/llama.cpp/ggml/src/ggml.c +170 -265
- package/src/llama.cpp/ggml/src/gguf.cpp +34 -33
- package/src/llama.cpp/include/llama.h +82 -22
- package/src/llama.cpp/models/ggml-vocab-llama4.gguf.inp +112 -0
- package/src/llama.cpp/models/ggml-vocab-llama4.gguf.out +46 -0
- package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.inp +112 -0
- package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.out +46 -0
- package/src/llama.cpp/requirements/requirements-all.txt +5 -3
- package/src/llama.cpp/requirements/requirements-gguf_editor_gui.txt +3 -0
- package/src/llama.cpp/scripts/xxd.cmake +1 -1
- package/src/llama.cpp/src/CMakeLists.txt +4 -2
- package/src/llama.cpp/src/llama-adapter.cpp +43 -1
- package/src/llama.cpp/src/llama-arch.cpp +163 -17
- package/src/llama.cpp/src/llama-arch.h +16 -0
- package/src/llama.cpp/src/llama-batch.cpp +5 -1
- package/src/llama.cpp/src/llama-batch.h +2 -1
- package/src/llama.cpp/src/llama-chat.cpp +91 -16
- package/src/llama.cpp/src/llama-chat.h +7 -2
- package/src/llama.cpp/src/llama-context.cpp +479 -575
- package/src/llama.cpp/src/llama-context.h +44 -33
- package/src/llama.cpp/src/llama-cparams.h +1 -0
- package/src/llama.cpp/src/llama-graph.cpp +209 -157
- package/src/llama.cpp/src/llama-graph.h +38 -14
- package/src/llama.cpp/src/llama-hparams.h +13 -0
- package/src/llama.cpp/src/llama-kv-cache.cpp +1604 -543
- package/src/llama.cpp/src/llama-kv-cache.h +283 -171
- package/src/llama.cpp/src/llama-memory.h +12 -2
- package/src/llama.cpp/src/llama-mmap.cpp +1 -1
- package/src/llama.cpp/src/llama-model-loader.cpp +34 -20
- package/src/llama.cpp/src/llama-model-loader.h +5 -3
- package/src/llama.cpp/src/llama-model-saver.cpp +281 -0
- package/src/llama.cpp/src/llama-model-saver.h +37 -0
- package/src/llama.cpp/src/llama-model.cpp +1803 -330
- package/src/llama.cpp/src/llama-model.h +21 -2
- package/src/llama.cpp/src/llama-quant.cpp +33 -10
- package/src/llama.cpp/src/llama-sampling.cpp +25 -7
- package/src/llama.cpp/src/llama-vocab.cpp +86 -10
- package/src/llama.cpp/src/llama-vocab.h +6 -0
- package/src/llama.cpp/src/llama.cpp +15 -1
- package/src/llama.cpp/tests/CMakeLists.txt +52 -31
- package/src/llama.cpp/tests/test-arg-parser.cpp +51 -4
- package/src/llama.cpp/tests/test-backend-ops.cpp +189 -90
- package/src/llama.cpp/tests/test-chat-template.cpp +26 -6
- package/src/llama.cpp/tests/test-chat.cpp +15 -3
- package/src/llama.cpp/{examples/gbnf-validator/gbnf-validator.cpp → tests/test-gbnf-validator.cpp} +2 -2
- package/src/llama.cpp/tests/test-grammar-integration.cpp +3 -2
- package/src/llama.cpp/tests/test-grammar-llguidance.cpp +63 -2
- package/src/llama.cpp/tests/test-grammar-parser.cpp +3 -1
- package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +17 -1
- package/src/llama.cpp/tests/test-llama-grammar.cpp +2 -1
- package/src/llama.cpp/tests/test-mtmd-c-api.c +63 -0
- package/src/llama.cpp/tests/test-opt.cpp +33 -21
- package/src/llama.cpp/{examples/quantize-stats/quantize-stats.cpp → tests/test-quantize-stats.cpp} +3 -1
- package/src/llama.cpp/tests/test-regex-partial.cpp +288 -0
- package/src/llama.cpp/tests/test-sampling.cpp +1 -1
- package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +2 -1
- package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +2 -1
- package/src/llama.cpp/tools/CMakeLists.txt +39 -0
- package/src/llama.cpp/{examples → tools}/batched-bench/batched-bench.cpp +3 -3
- package/src/llama.cpp/{examples → tools}/export-lora/export-lora.cpp +1 -1
- package/src/llama.cpp/{examples → tools}/gguf-split/gguf-split.cpp +15 -16
- package/src/llama.cpp/{examples → tools}/imatrix/imatrix.cpp +11 -9
- package/src/llama.cpp/{examples → tools}/llama-bench/llama-bench.cpp +623 -274
- package/src/llama.cpp/{examples → tools}/main/main.cpp +22 -14
- package/src/llama.cpp/tools/mtmd/CMakeLists.txt +47 -0
- package/src/llama.cpp/tools/mtmd/clip-impl.h +365 -0
- package/src/llama.cpp/tools/mtmd/clip.cpp +3646 -0
- package/src/llama.cpp/tools/mtmd/clip.h +99 -0
- package/src/llama.cpp/tools/mtmd/deprecation-warning.cpp +22 -0
- package/src/llama.cpp/tools/mtmd/mtmd-cli.cpp +370 -0
- package/src/llama.cpp/tools/mtmd/mtmd-helper.cpp +310 -0
- package/src/llama.cpp/tools/mtmd/mtmd.cpp +678 -0
- package/src/llama.cpp/tools/mtmd/mtmd.h +331 -0
- package/src/llama.cpp/{examples → tools}/perplexity/perplexity.cpp +21 -5
- package/src/llama.cpp/{examples → tools}/quantize/quantize.cpp +53 -3
- package/src/llama.cpp/tools/rpc/CMakeLists.txt +4 -0
- package/src/llama.cpp/tools/rpc/rpc-server.cpp +322 -0
- package/src/llama.cpp/tools/run/CMakeLists.txt +16 -0
- package/src/llama.cpp/{examples → tools}/run/run.cpp +30 -30
- package/src/llama.cpp/{examples → tools}/server/CMakeLists.txt +2 -1
- package/src/llama.cpp/{examples → tools}/server/httplib.h +313 -247
- package/src/llama.cpp/{examples → tools}/server/server.cpp +529 -215
- package/src/llama.cpp/{examples → tools}/server/utils.hpp +427 -6
- package/src/llama.cpp/{examples → tools}/tts/tts.cpp +6 -9
- package/src/llama.cpp/cmake/arm64-windows-msvc.cmake +0 -6
- package/src/llama.cpp/examples/gbnf-validator/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/infill/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/infill/infill.cpp +0 -590
- package/src/llama.cpp/examples/llava/CMakeLists.txt +0 -66
- package/src/llama.cpp/examples/llava/android/build_64.sh +0 -8
- package/src/llama.cpp/examples/llava/clip-quantize-cli.cpp +0 -59
- package/src/llama.cpp/examples/llava/clip.cpp +0 -3206
- package/src/llama.cpp/examples/llava/clip.h +0 -118
- package/src/llama.cpp/examples/llava/gemma3-cli.cpp +0 -341
- package/src/llama.cpp/examples/llava/llava-cli.cpp +0 -332
- package/src/llama.cpp/examples/llava/llava.cpp +0 -574
- package/src/llama.cpp/examples/llava/llava.h +0 -49
- package/src/llama.cpp/examples/llava/minicpmv-cli.cpp +0 -354
- package/src/llama.cpp/examples/llava/qwen2vl-cli.cpp +0 -584
- package/src/llama.cpp/examples/quantize-stats/CMakeLists.txt +0 -6
- package/src/llama.cpp/examples/rpc/CMakeLists.txt +0 -2
- package/src/llama.cpp/examples/rpc/rpc-server.cpp +0 -171
- package/src/llama.cpp/examples/run/CMakeLists.txt +0 -5
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/CMakeLists.txt +0 -30
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/ascendc_kernels.h +0 -19
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/dup.cpp +0 -234
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_f16.cpp +0 -197
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_f32.cpp +0 -190
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_q4_0.cpp +0 -204
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_q8_0.cpp +0 -191
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/quantize_f16_q8_0.cpp +0 -218
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/quantize_f32_q8_0.cpp +0 -216
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp +0 -295
- /package/src/llama.cpp/{examples → tools}/batched-bench/CMakeLists.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/cvector-generator/CMakeLists.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/cvector-generator/completions.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/cvector-generator/cvector-generator.cpp +0 -0
- /package/src/llama.cpp/{examples → tools}/cvector-generator/mean.hpp +0 -0
- /package/src/llama.cpp/{examples → tools}/cvector-generator/negative.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/cvector-generator/pca.hpp +0 -0
- /package/src/llama.cpp/{examples → tools}/cvector-generator/positive.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/export-lora/CMakeLists.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/gguf-split/CMakeLists.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/imatrix/CMakeLists.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/llama-bench/CMakeLists.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/main/CMakeLists.txt +0 -0
- /package/src/llama.cpp/{examples/llava → tools/mtmd}/requirements.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/perplexity/CMakeLists.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/quantize/CMakeLists.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/run/linenoise.cpp/linenoise.cpp +0 -0
- /package/src/llama.cpp/{examples → tools}/run/linenoise.cpp/linenoise.h +0 -0
- /package/src/llama.cpp/{examples → tools}/server/bench/requirements.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/server/tests/requirements.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/tokenize/CMakeLists.txt +0 -0
- /package/src/llama.cpp/{examples → tools}/tokenize/tokenize.cpp +0 -0
- /package/src/llama.cpp/{examples → tools}/tts/CMakeLists.txt +0 -0
|
@@ -0,0 +1,331 @@
|
|
|
1
|
+
#ifndef MTMD_H
|
|
2
|
+
#define MTMD_H
|
|
3
|
+
|
|
4
|
+
#include "ggml.h"
|
|
5
|
+
#include "llama.h"
|
|
6
|
+
#include "clip.h"
|
|
7
|
+
|
|
8
|
+
#include <stddef.h>
|
|
9
|
+
#include <stdint.h>
|
|
10
|
+
#include <stdbool.h>
|
|
11
|
+
|
|
12
|
+
#ifdef __cplusplus
|
|
13
|
+
#include <string>
|
|
14
|
+
#include <vector>
|
|
15
|
+
#include <cinttypes>
|
|
16
|
+
#include <memory>
|
|
17
|
+
#endif
|
|
18
|
+
|
|
19
|
+
/**
|
|
20
|
+
* libmtmd: A library for multimodal support in llama.cpp.
|
|
21
|
+
*
|
|
22
|
+
* WARNING: This API is experimental and subject to many BREAKING CHANGES.
|
|
23
|
+
* Issues related to API usage may receive lower priority support.
|
|
24
|
+
*
|
|
25
|
+
* For the usage, see an example in mtmd-cli.cpp
|
|
26
|
+
*/
|
|
27
|
+
|
|
28
|
+
#ifdef LLAMA_SHARED
|
|
29
|
+
# if defined(_WIN32) && !defined(__MINGW32__)
|
|
30
|
+
# ifdef LLAMA_BUILD
|
|
31
|
+
# define MTMD_API __declspec(dllexport)
|
|
32
|
+
# else
|
|
33
|
+
# define MTMD_API __declspec(dllimport)
|
|
34
|
+
# endif
|
|
35
|
+
# else
|
|
36
|
+
# define MTMD_API __attribute__ ((visibility ("default")))
|
|
37
|
+
# endif
|
|
38
|
+
#else
|
|
39
|
+
# define MTMD_API
|
|
40
|
+
#endif
|
|
41
|
+
|
|
42
|
+
#define MTMD_DEFAULT_IMAGE_MARKER "<__image__>"
|
|
43
|
+
|
|
44
|
+
#ifdef __cplusplus
|
|
45
|
+
extern "C" {
|
|
46
|
+
#endif
|
|
47
|
+
|
|
48
|
+
enum mtmd_input_chunk_type {
|
|
49
|
+
MTMD_INPUT_CHUNK_TYPE_TEXT,
|
|
50
|
+
MTMD_INPUT_CHUNK_TYPE_IMAGE,
|
|
51
|
+
};
|
|
52
|
+
|
|
53
|
+
// opaque types
|
|
54
|
+
struct mtmd_context;
|
|
55
|
+
struct mtmd_bitmap;
|
|
56
|
+
struct mtmd_image_tokens;
|
|
57
|
+
struct mtmd_input_chunk;
|
|
58
|
+
struct mtmd_input_chunks;
|
|
59
|
+
|
|
60
|
+
struct mtmd_input_text {
|
|
61
|
+
const char * text;
|
|
62
|
+
bool add_special;
|
|
63
|
+
bool parse_special;
|
|
64
|
+
};
|
|
65
|
+
|
|
66
|
+
//
|
|
67
|
+
// C API
|
|
68
|
+
//
|
|
69
|
+
|
|
70
|
+
typedef struct mtmd_context mtmd_context;
|
|
71
|
+
typedef struct mtmd_bitmap mtmd_bitmap;
|
|
72
|
+
typedef struct mtmd_image_tokens mtmd_image_tokens;
|
|
73
|
+
typedef struct mtmd_input_chunk mtmd_input_chunk;
|
|
74
|
+
typedef struct mtmd_input_chunks mtmd_input_chunks;
|
|
75
|
+
typedef struct mtmd_input_text mtmd_input_text;
|
|
76
|
+
|
|
77
|
+
struct mtmd_context_params {
|
|
78
|
+
bool use_gpu;
|
|
79
|
+
bool print_timings;
|
|
80
|
+
int n_threads;
|
|
81
|
+
enum ggml_log_level verbosity;
|
|
82
|
+
const char * image_marker;
|
|
83
|
+
};
|
|
84
|
+
|
|
85
|
+
MTMD_API struct mtmd_context_params mtmd_context_params_default(void);
|
|
86
|
+
|
|
87
|
+
// initialize the mtmd context
|
|
88
|
+
// return nullptr on failure
|
|
89
|
+
MTMD_API mtmd_context * mtmd_init_from_file(const char * mmproj_fname,
|
|
90
|
+
const struct llama_model * text_model,
|
|
91
|
+
const struct mtmd_context_params ctx_params);
|
|
92
|
+
|
|
93
|
+
MTMD_API void mtmd_free(mtmd_context * ctx);
|
|
94
|
+
|
|
95
|
+
// whether we need to set non-causal mask before llama_decode
|
|
96
|
+
MTMD_API bool mtmd_decode_use_non_causal(mtmd_context * ctx);
|
|
97
|
+
|
|
98
|
+
// whether the current model use M-RoPE for llama_decode
|
|
99
|
+
MTMD_API bool mtmd_decode_use_mrope(mtmd_context * ctx);
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
// mtmd_bitmap
|
|
103
|
+
//
|
|
104
|
+
// length of data must be nx * ny * 3
|
|
105
|
+
// the data is in RGBRGBRGB... format
|
|
106
|
+
MTMD_API mtmd_bitmap * mtmd_bitmap_init (uint32_t nx,
|
|
107
|
+
uint32_t ny,
|
|
108
|
+
const unsigned char * data);
|
|
109
|
+
MTMD_API uint32_t mtmd_bitmap_get_nx (const mtmd_bitmap * bitmap);
|
|
110
|
+
MTMD_API uint32_t mtmd_bitmap_get_ny (const mtmd_bitmap * bitmap);
|
|
111
|
+
MTMD_API const unsigned char * mtmd_bitmap_get_data(const mtmd_bitmap * bitmap);
|
|
112
|
+
MTMD_API void mtmd_bitmap_free (mtmd_bitmap * bitmap);
|
|
113
|
+
// bitmap ID is optional, but useful for KV cache tracking
|
|
114
|
+
// these getters/setters are dedicated functions, so you can for example calculate the hash of the image based on mtmd_bitmap_get_data()
|
|
115
|
+
MTMD_API const char * mtmd_bitmap_get_id(const mtmd_bitmap * bitmap);
|
|
116
|
+
MTMD_API void mtmd_bitmap_set_id(mtmd_bitmap * bitmap, const char * id);
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
// mtmd_input_chunks
|
|
120
|
+
//
|
|
121
|
+
// this is simply a list of mtmd_input_chunk
|
|
122
|
+
// the elements can only be populated via mtmd_tokenize()
|
|
123
|
+
MTMD_API mtmd_input_chunks * mtmd_input_chunks_init(void);
|
|
124
|
+
MTMD_API size_t mtmd_input_chunks_size(const mtmd_input_chunks * chunks);
|
|
125
|
+
MTMD_API const mtmd_input_chunk * mtmd_input_chunks_get (const mtmd_input_chunks * chunks, size_t idx);
|
|
126
|
+
MTMD_API void mtmd_input_chunks_free(mtmd_input_chunks * chunks);
|
|
127
|
+
|
|
128
|
+
// mtmd_input_chunk
|
|
129
|
+
//
|
|
130
|
+
// the instance will be constructed via mtmd_tokenize()
|
|
131
|
+
// it will be freed along with mtmd_input_chunks
|
|
132
|
+
MTMD_API enum mtmd_input_chunk_type mtmd_input_chunk_get_type (const mtmd_input_chunk * chunk);
|
|
133
|
+
MTMD_API const llama_token * mtmd_input_chunk_get_tokens_text (const mtmd_input_chunk * chunk, size_t * n_tokens_output);
|
|
134
|
+
MTMD_API const mtmd_image_tokens * mtmd_input_chunk_get_tokens_image(const mtmd_input_chunk * chunk);
|
|
135
|
+
|
|
136
|
+
// in case you want to use custom logic to handle the chunk (i.e. KV cache management)
|
|
137
|
+
// you can move the chunk ownership to your own code by copying it
|
|
138
|
+
// remember to free the chunk when you are done with it
|
|
139
|
+
MTMD_API mtmd_input_chunk * mtmd_input_chunk_copy(const mtmd_input_chunk * chunk);
|
|
140
|
+
MTMD_API void mtmd_input_chunk_free(mtmd_input_chunk * chunk);
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
// mtmd_image_tokens
|
|
144
|
+
//
|
|
145
|
+
// the instance will be constructed via mtmd_tokenize()
|
|
146
|
+
// it will be freed along with mtmd_input_chunk
|
|
147
|
+
MTMD_API size_t mtmd_image_tokens_get_n_tokens(const mtmd_image_tokens * image_tokens);
|
|
148
|
+
MTMD_API size_t mtmd_image_tokens_get_nx (const mtmd_image_tokens * image_tokens);
|
|
149
|
+
MTMD_API size_t mtmd_image_tokens_get_ny (const mtmd_image_tokens * image_tokens);
|
|
150
|
+
MTMD_API const char * mtmd_image_tokens_get_id (const mtmd_image_tokens * image_tokens);
|
|
151
|
+
// number of temporal positions (always 1 for M-RoPE, n_tokens otherwise)
|
|
152
|
+
MTMD_API llama_pos mtmd_image_tokens_get_n_pos (const mtmd_image_tokens * image_tokens);
|
|
153
|
+
|
|
154
|
+
// tokenize an input text prompt and an image
|
|
155
|
+
// the prompt must have the input image marker (default: "<__image__>") in it
|
|
156
|
+
// the marker will be replaced with the image tokens
|
|
157
|
+
// for example:
|
|
158
|
+
// "here is an image: <__image__>\ndescribe it in detail."
|
|
159
|
+
// this will gives 3 chunks:
|
|
160
|
+
// 1. "here is an image: <start_of_image>"
|
|
161
|
+
// 2. (image tokens)
|
|
162
|
+
// 3. "<end_of_image>\ndescribe it in detail."
|
|
163
|
+
// number of bitmaps must be equal to the number of image markers in the prompt
|
|
164
|
+
// this function is thread-safe (shared ctx)
|
|
165
|
+
// return values:
|
|
166
|
+
// 0 on success
|
|
167
|
+
// 1 on number of images not matching the number of markers
|
|
168
|
+
// 2 on image preprocessing error
|
|
169
|
+
MTMD_API int32_t mtmd_tokenize(mtmd_context * ctx,
|
|
170
|
+
mtmd_input_chunks * output,
|
|
171
|
+
const mtmd_input_text * text,
|
|
172
|
+
const mtmd_bitmap ** bitmaps,
|
|
173
|
+
size_t n_bitmaps);
|
|
174
|
+
|
|
175
|
+
// returns 0 on success
|
|
176
|
+
MTMD_API int32_t mtmd_encode(mtmd_context * ctx,
|
|
177
|
+
const mtmd_image_tokens * image_tokens);
|
|
178
|
+
|
|
179
|
+
// get output embeddings from the last encode pass
|
|
180
|
+
MTMD_API float * mtmd_get_output_embd(mtmd_context * ctx);
|
|
181
|
+
|
|
182
|
+
/////////////////////////////////////////
|
|
183
|
+
|
|
184
|
+
//
|
|
185
|
+
// Helper functions (can be implemented based on other functions)
|
|
186
|
+
//
|
|
187
|
+
// Please note that these helpers are not guaranteed to be stable.
|
|
188
|
+
// BREAKING CHANGES are expected.
|
|
189
|
+
//
|
|
190
|
+
|
|
191
|
+
// helper function to construct a mtmd_bitmap from a file
|
|
192
|
+
// returns nullptr on failure
|
|
193
|
+
// this function is thread-safe
|
|
194
|
+
MTMD_API mtmd_bitmap * mtmd_helper_bitmap_init_from_file(const char * fname);
|
|
195
|
+
|
|
196
|
+
// helper function to construct a mtmd_bitmap from a buffer containing a file
|
|
197
|
+
// the file content must be an image in format supported by stb_image (jpg, png, bmp, gif, etc.)
|
|
198
|
+
// returns nullptr on failure
|
|
199
|
+
// this function is thread-safe
|
|
200
|
+
MTMD_API mtmd_bitmap * mtmd_helper_bitmap_init_from_buf(const unsigned char * buf, size_t len);
|
|
201
|
+
|
|
202
|
+
// helper to count the total number of tokens from a list of chunks, useful to keep track of KV cache
|
|
203
|
+
MTMD_API size_t mtmd_helper_get_n_tokens(const mtmd_input_chunks * chunks);
|
|
204
|
+
|
|
205
|
+
// helper to count the total position of tokens from a list of chunks, useful to keep track of n_past
|
|
206
|
+
// normally, n_pos is equal to n_tokens, but for M-RoPE it is different
|
|
207
|
+
MTMD_API llama_pos mtmd_helper_get_n_pos(const mtmd_input_chunks * chunks);
|
|
208
|
+
|
|
209
|
+
// helper function that automatically:
|
|
210
|
+
// 1. run llama_decode() on text chunks
|
|
211
|
+
// 2. run mtmd_encode() on image chunks, then mtmd_get_output_embd() and then llama_decode()
|
|
212
|
+
// if any of the mtmd_encode() or llama_decode() calls return non-zero, stop and forward the error
|
|
213
|
+
// otherwise, returns 0 on success
|
|
214
|
+
// this function is NOT thread-safe
|
|
215
|
+
MTMD_API int32_t mtmd_helper_eval_chunks(mtmd_context * ctx,
|
|
216
|
+
struct llama_context * lctx,
|
|
217
|
+
const mtmd_input_chunks * chunks,
|
|
218
|
+
llama_pos n_past,
|
|
219
|
+
llama_seq_id seq_id,
|
|
220
|
+
int32_t n_batch,
|
|
221
|
+
bool logits_last,
|
|
222
|
+
llama_pos * new_n_past);
|
|
223
|
+
|
|
224
|
+
// works like mtmd_helper_eval_chunks(), but only for a single chunk
|
|
225
|
+
// this function is NOT thread-safe
|
|
226
|
+
MTMD_API int32_t mtmd_helper_eval_chunk_single(mtmd_context * ctx,
|
|
227
|
+
struct llama_context * lctx,
|
|
228
|
+
const mtmd_input_chunk * chunk,
|
|
229
|
+
llama_pos n_past,
|
|
230
|
+
llama_seq_id seq_id,
|
|
231
|
+
int32_t n_batch,
|
|
232
|
+
bool logits_last,
|
|
233
|
+
llama_pos * new_n_past);
|
|
234
|
+
|
|
235
|
+
// helper function to decode an image whose embeddings have already been calculated
|
|
236
|
+
// this helper will handle batching and pre/post decoding setup (for ex. gemma 3 requires non-causal attention)
|
|
237
|
+
// ret 0 on success, -1 on chunk not being a valid image chunk, 1 on decode failure
|
|
238
|
+
MTMD_API int32_t mtmd_helper_decode_image_chunk(mtmd_context * ctx,
|
|
239
|
+
struct llama_context * lctx,
|
|
240
|
+
const mtmd_input_chunk * chunk,
|
|
241
|
+
float * encoded_embd,
|
|
242
|
+
llama_pos n_past,
|
|
243
|
+
llama_seq_id seq_id,
|
|
244
|
+
int32_t n_batch,
|
|
245
|
+
llama_pos * new_n_past);
|
|
246
|
+
|
|
247
|
+
/////////////////////////////////////////
|
|
248
|
+
|
|
249
|
+
// test function, to be used in test-mtmd-c-api.c
|
|
250
|
+
MTMD_API mtmd_input_chunks * mtmd_test_create_input_chunks(void);
|
|
251
|
+
|
|
252
|
+
#ifdef __cplusplus
|
|
253
|
+
} // extern "C"
|
|
254
|
+
#endif
|
|
255
|
+
|
|
256
|
+
//
|
|
257
|
+
// C++ wrappers
|
|
258
|
+
//
|
|
259
|
+
|
|
260
|
+
#ifdef __cplusplus
|
|
261
|
+
|
|
262
|
+
namespace mtmd {
|
|
263
|
+
|
|
264
|
+
struct mtmd_context_deleter {
|
|
265
|
+
void operator()(mtmd_context * val) { mtmd_free(val); }
|
|
266
|
+
};
|
|
267
|
+
using context_ptr = std::unique_ptr<mtmd_context, mtmd_context_deleter>;
|
|
268
|
+
|
|
269
|
+
struct mtmd_bitmap_deleter {
|
|
270
|
+
void operator()(mtmd_bitmap * val) { mtmd_bitmap_free(val); }
|
|
271
|
+
};
|
|
272
|
+
using bitmap_ptr = std::unique_ptr<mtmd_bitmap, mtmd_bitmap_deleter>;
|
|
273
|
+
|
|
274
|
+
struct mtmd_input_chunks_deleter {
|
|
275
|
+
void operator()(mtmd_input_chunks * val) { mtmd_input_chunks_free(val); }
|
|
276
|
+
};
|
|
277
|
+
using input_chunks_ptr = std::unique_ptr<mtmd_input_chunks, mtmd_input_chunks_deleter>;
|
|
278
|
+
|
|
279
|
+
struct mtmd_input_chunk_deleter {
|
|
280
|
+
void operator()(mtmd_input_chunk * val) { mtmd_input_chunk_free(val); }
|
|
281
|
+
};
|
|
282
|
+
using input_chunk_ptr = std::unique_ptr<mtmd_input_chunk, mtmd_input_chunk_deleter>;
|
|
283
|
+
|
|
284
|
+
struct bitmap {
|
|
285
|
+
bitmap_ptr ptr;
|
|
286
|
+
bitmap() : ptr(nullptr) {}
|
|
287
|
+
bitmap(mtmd_bitmap * bitmap) : ptr(bitmap) {}
|
|
288
|
+
bitmap(bitmap && other) noexcept : ptr(std::move(other.ptr)) {}
|
|
289
|
+
bitmap(uint32_t nx, uint32_t ny, const unsigned char * data) {
|
|
290
|
+
ptr.reset(mtmd_bitmap_init(nx, ny, data));
|
|
291
|
+
}
|
|
292
|
+
~bitmap() = default;
|
|
293
|
+
uint32_t nx() { return mtmd_bitmap_get_nx(ptr.get()); }
|
|
294
|
+
uint32_t ny() { return mtmd_bitmap_get_ny(ptr.get()); }
|
|
295
|
+
const unsigned char * data() { return mtmd_bitmap_get_data(ptr.get()); }
|
|
296
|
+
std::string id() { return mtmd_bitmap_get_id(ptr.get()); }
|
|
297
|
+
void set_id(const char * id) { mtmd_bitmap_set_id(ptr.get(), id); }
|
|
298
|
+
};
|
|
299
|
+
|
|
300
|
+
struct bitmaps {
|
|
301
|
+
std::vector<bitmap> entries;
|
|
302
|
+
~bitmaps() = default;
|
|
303
|
+
// return list of pointers to mtmd_bitmap
|
|
304
|
+
// example:
|
|
305
|
+
// auto bitmaps_c_ptr = bitmaps.c_ptr();
|
|
306
|
+
// int32_t res = mtmd_tokenize(... bitmaps_c_ptr.data(), bitmaps_c_ptr.size());
|
|
307
|
+
std::vector<const mtmd_bitmap *> c_ptr() {
|
|
308
|
+
std::vector<const mtmd_bitmap *> res(entries.size());
|
|
309
|
+
for (size_t i = 0; i < entries.size(); i++) {
|
|
310
|
+
res[i] = entries[i].ptr.get();
|
|
311
|
+
}
|
|
312
|
+
return res;
|
|
313
|
+
}
|
|
314
|
+
};
|
|
315
|
+
|
|
316
|
+
struct input_chunks {
|
|
317
|
+
input_chunks_ptr ptr;
|
|
318
|
+
input_chunks() = default;
|
|
319
|
+
input_chunks(mtmd_input_chunks * chunks) : ptr(chunks) {}
|
|
320
|
+
~input_chunks() = default;
|
|
321
|
+
size_t size() { return mtmd_input_chunks_size(ptr.get()); }
|
|
322
|
+
const mtmd_input_chunk * operator[](size_t idx) {
|
|
323
|
+
return mtmd_input_chunks_get(ptr.get(), idx);
|
|
324
|
+
}
|
|
325
|
+
};
|
|
326
|
+
|
|
327
|
+
} // namespace mtmd
|
|
328
|
+
|
|
329
|
+
#endif
|
|
330
|
+
|
|
331
|
+
#endif
|
|
@@ -851,7 +851,7 @@ static void hellaswag_score(llama_context * ctx, const common_params & params) {
|
|
|
851
851
|
|
|
852
852
|
LOG_INF("%s : calculating hellaswag score over selected tasks.\n", __func__);
|
|
853
853
|
|
|
854
|
-
LOG("\ntask\tacc_norm\n");
|
|
854
|
+
LOG("\ntask\tacc_norm\t95%% confidence interval\n");
|
|
855
855
|
|
|
856
856
|
double acc = 0.0f;
|
|
857
857
|
|
|
@@ -985,8 +985,22 @@ static void hellaswag_score(llama_context * ctx, const common_params & params) {
|
|
|
985
985
|
acc += 1.0;
|
|
986
986
|
}
|
|
987
987
|
|
|
988
|
-
|
|
989
|
-
|
|
988
|
+
double freq = acc / double(i + 1);
|
|
989
|
+
|
|
990
|
+
const double za = 1.95996398454;
|
|
991
|
+
|
|
992
|
+
// // Wald normal approx
|
|
993
|
+
// double conf =za*sqrt(freq*(1-freq)/double(i + 1));
|
|
994
|
+
// LOG("%zu\t%.8lf +/- %.8lf\n", i + 1, freq*100.0, conf*100.0);
|
|
995
|
+
|
|
996
|
+
// Wilson score interval, more accurate
|
|
997
|
+
double z = za * za / double(i + 1);
|
|
998
|
+
double cnf = z * sqrt(double(i + 1) * (4.0 * freq * (1 - freq) + z)) / (za + za);
|
|
999
|
+
double a = (freq + z * 0.5 - cnf) / (1.0 + z);
|
|
1000
|
+
double b = (freq + z * 0.5 + cnf) / (1.0 + z);
|
|
1001
|
+
|
|
1002
|
+
// Print the accumulated accuracy mean x 100 and confidence interval
|
|
1003
|
+
LOG("%zu\t%3.8lf%%\t[%3.4lf%%, %3.4lf%%]\n", i + 1, freq * 100.0, a * 100.0, b * 100.0);
|
|
990
1004
|
}
|
|
991
1005
|
|
|
992
1006
|
i0 = i1 - 1;
|
|
@@ -1540,7 +1554,10 @@ static void multiple_choice_score(llama_context * ctx, const common_params & par
|
|
|
1540
1554
|
if (int(batch_indeces.size()) != num_answers) {
|
|
1541
1555
|
batch_indeces.resize(num_answers);
|
|
1542
1556
|
}
|
|
1543
|
-
|
|
1557
|
+
|
|
1558
|
+
for (int s = 0; s < num_answers; ++s) {
|
|
1559
|
+
batch_indeces[s] = s0 + s;
|
|
1560
|
+
}
|
|
1544
1561
|
|
|
1545
1562
|
for (size_t i = 0; i < cur_task.common_prefix; ++i) {
|
|
1546
1563
|
//llama_batch_add(batch, cur_task.seq_tokens[0][i], i, { s0 + 0, s0 + 1, s0 + 2, s0 + 3}, false);
|
|
@@ -1956,7 +1973,6 @@ int main(int argc, char ** argv) {
|
|
|
1956
1973
|
common_params params;
|
|
1957
1974
|
|
|
1958
1975
|
params.n_ctx = 512;
|
|
1959
|
-
params.logits_all = true;
|
|
1960
1976
|
params.escape = false;
|
|
1961
1977
|
|
|
1962
1978
|
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_PERPLEXITY)) {
|
|
@@ -9,6 +9,7 @@
|
|
|
9
9
|
#include <fstream>
|
|
10
10
|
#include <cmath>
|
|
11
11
|
#include <cctype>
|
|
12
|
+
#include <algorithm>
|
|
12
13
|
|
|
13
14
|
struct quant_option {
|
|
14
15
|
std::string name;
|
|
@@ -16,7 +17,7 @@ struct quant_option {
|
|
|
16
17
|
std::string desc;
|
|
17
18
|
};
|
|
18
19
|
|
|
19
|
-
static const std::vector<
|
|
20
|
+
static const std::vector<quant_option> QUANT_OPTIONS = {
|
|
20
21
|
{ "Q4_0", LLAMA_FTYPE_MOSTLY_Q4_0, " 4.34G, +0.4685 ppl @ Llama-3-8B", },
|
|
21
22
|
{ "Q4_1", LLAMA_FTYPE_MOSTLY_Q4_1, " 4.78G, +0.4511 ppl @ Llama-3-8B", },
|
|
22
23
|
{ "Q5_0", LLAMA_FTYPE_MOSTLY_Q5_0, " 5.21G, +0.1316 ppl @ Llama-3-8B", },
|
|
@@ -56,6 +57,12 @@ static const std::vector<struct quant_option> QUANT_OPTIONS = {
|
|
|
56
57
|
{ "COPY", LLAMA_FTYPE_ALL_F32, "only copy tensors, no quantizing", },
|
|
57
58
|
};
|
|
58
59
|
|
|
60
|
+
// Quantization types. Changes to this struct must be replicated in llama-quantize.cpp
|
|
61
|
+
struct tensor_quantization {
|
|
62
|
+
std::string name;
|
|
63
|
+
ggml_type quant = GGML_TYPE_COUNT;
|
|
64
|
+
};
|
|
65
|
+
|
|
59
66
|
static const char * const LLM_KV_QUANTIZE_IMATRIX_FILE = "quantize.imatrix.file";
|
|
60
67
|
static const char * const LLM_KV_QUANTIZE_IMATRIX_DATASET = "quantize.imatrix.dataset";
|
|
61
68
|
static const char * const LLM_KV_QUANTIZE_IMATRIX_N_ENTRIES = "quantize.imatrix.entries_count";
|
|
@@ -105,7 +112,8 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp
|
|
|
105
112
|
//
|
|
106
113
|
[[noreturn]]
|
|
107
114
|
static void usage(const char * executable) {
|
|
108
|
-
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type]
|
|
115
|
+
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type]\n", executable);
|
|
116
|
+
printf(" [--token-embedding-type] [--tensor-type] [--keep-split] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n");
|
|
109
117
|
printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
|
|
110
118
|
printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
|
|
111
119
|
printf(" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n");
|
|
@@ -114,6 +122,8 @@ static void usage(const char * executable) {
|
|
|
114
122
|
printf(" --exclude-weights tensor_name: use importance matrix for this/these tensor(s)\n");
|
|
115
123
|
printf(" --output-tensor-type ggml_type: use this ggml_type for the output.weight tensor\n");
|
|
116
124
|
printf(" --token-embedding-type ggml_type: use this ggml_type for the token embeddings tensor\n");
|
|
125
|
+
printf(" --tensor-type TENSOR=TYPE: quantize this tensor to this ggml_type. example: --tensor-type attn_q=q8_0\n");
|
|
126
|
+
printf(" Advanced option to selectively quantize tensors. May be specified multiple times.\n");
|
|
117
127
|
printf(" --keep-split: will generate quantized model in the same shards as input\n");
|
|
118
128
|
printf(" --override-kv KEY=TYPE:VALUE\n");
|
|
119
129
|
printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n");
|
|
@@ -240,10 +250,42 @@ static ggml_type parse_ggml_type(const char * arg) {
|
|
|
240
250
|
return type;
|
|
241
251
|
}
|
|
242
252
|
}
|
|
243
|
-
fprintf(stderr, "%s: invalid ggml_type '%s'\n", __func__, arg);
|
|
253
|
+
fprintf(stderr, "\n%s: invalid ggml_type '%s'\n\n", __func__, arg);
|
|
244
254
|
return GGML_TYPE_COUNT;
|
|
245
255
|
}
|
|
246
256
|
|
|
257
|
+
static bool parse_tensor_type(const char * data, std::vector<tensor_quantization> & tensor_type) {
|
|
258
|
+
const char * sep = strchr(data, '=');
|
|
259
|
+
if (sep == nullptr) {
|
|
260
|
+
printf("\n%s: malformed tensor type '%s'\n\n", __func__, data);
|
|
261
|
+
return false;
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
const size_t tn_len = sep - data;
|
|
265
|
+
if (tn_len == 0) {
|
|
266
|
+
printf("\n%s: missing tensor name\n\n", __func__);
|
|
267
|
+
return false;
|
|
268
|
+
}
|
|
269
|
+
if (const size_t qt_len = strlen(sep); qt_len == 1) {
|
|
270
|
+
printf("\n%s: missing quantization type\n\n", __func__);
|
|
271
|
+
return false;
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
std::string tn(data, tn_len);
|
|
275
|
+
std::transform(tn.begin(), tn.end(), tn.begin(), tolower);
|
|
276
|
+
sep++;
|
|
277
|
+
tensor_quantization tqz;
|
|
278
|
+
tqz.name = tn;
|
|
279
|
+
tqz.quant = parse_ggml_type(sep);
|
|
280
|
+
tensor_type.emplace_back(std::move(tqz));
|
|
281
|
+
if (tqz.quant == GGML_TYPE_COUNT) {
|
|
282
|
+
printf("\n%s: invalid quantization type '%s'\n\n", __func__, sep);
|
|
283
|
+
return false;
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
return true;
|
|
287
|
+
}
|
|
288
|
+
|
|
247
289
|
int main(int argc, char ** argv) {
|
|
248
290
|
if (argc < 3) {
|
|
249
291
|
usage(argv[0]);
|
|
@@ -255,6 +297,7 @@ int main(int argc, char ** argv) {
|
|
|
255
297
|
std::string imatrix_file;
|
|
256
298
|
std::vector<std::string> included_weights, excluded_weights;
|
|
257
299
|
std::vector<llama_model_kv_override> kv_overrides;
|
|
300
|
+
std::vector<tensor_quantization> tensor_types;
|
|
258
301
|
|
|
259
302
|
for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) {
|
|
260
303
|
if (strcmp(argv[arg_idx], "--leave-output-tensor") == 0) {
|
|
@@ -277,6 +320,10 @@ int main(int argc, char ** argv) {
|
|
|
277
320
|
} else {
|
|
278
321
|
usage(argv[0]);
|
|
279
322
|
}
|
|
323
|
+
} else if (strcmp(argv[arg_idx], "--tensor-type") == 0) {
|
|
324
|
+
if (arg_idx == argc-1 || !parse_tensor_type(argv[++arg_idx], tensor_types)) {
|
|
325
|
+
usage(argv[0]);
|
|
326
|
+
}
|
|
280
327
|
} else if (strcmp(argv[arg_idx], "--override-kv") == 0) {
|
|
281
328
|
if (arg_idx == argc-1 || !string_parse_kv_override(argv[++arg_idx], kv_overrides)) {
|
|
282
329
|
usage(argv[0]);
|
|
@@ -361,6 +408,9 @@ int main(int argc, char ** argv) {
|
|
|
361
408
|
kv_overrides.back().key[0] = 0;
|
|
362
409
|
params.kv_overrides = &kv_overrides;
|
|
363
410
|
}
|
|
411
|
+
if (!tensor_types.empty()) {
|
|
412
|
+
params.tensor_types = &tensor_types;
|
|
413
|
+
}
|
|
364
414
|
|
|
365
415
|
llama_backend_init();
|
|
366
416
|
|