@fugood/llama.node 0.6.3 → 1.0.0-beta.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CMakeLists.txt +40 -30
- package/README.md +4 -1
- package/lib/binding.js +41 -29
- package/lib/binding.ts +26 -25
- package/package.json +40 -7
- package/scripts/build.js +47 -0
- package/scripts/llama.cpp.patch +109 -0
- package/src/anyascii.c +22223 -0
- package/src/anyascii.h +42 -0
- package/src/tts_utils.cpp +20 -7
- package/src/tts_utils.h +2 -0
- package/bin/darwin/arm64/llama-node.node +0 -0
- package/bin/darwin/x64/llama-node.node +0 -0
- package/bin/linux/arm64/llama-node.node +0 -0
- package/bin/linux/x64/llama-node.node +0 -0
- package/bin/linux-cuda/arm64/llama-node.node +0 -0
- package/bin/linux-cuda/x64/llama-node.node +0 -0
- package/bin/linux-vulkan/arm64/llama-node.node +0 -0
- package/bin/linux-vulkan/x64/llama-node.node +0 -0
- package/bin/win32/x64/llama-node.node +0 -0
- package/bin/win32/x64/node.lib +0 -0
- package/bin/win32-vulkan/arm64/llama-node.node +0 -0
- package/bin/win32-vulkan/arm64/node.lib +0 -0
- package/bin/win32-vulkan/x64/llama-node.node +0 -0
- package/bin/win32-vulkan/x64/node.lib +0 -0
- package/src/llama.cpp/.github/workflows/build-linux-cross.yml +0 -233
- package/src/llama.cpp/.github/workflows/build.yml +0 -1078
- package/src/llama.cpp/.github/workflows/close-issue.yml +0 -28
- package/src/llama.cpp/.github/workflows/docker.yml +0 -178
- package/src/llama.cpp/.github/workflows/editorconfig.yml +0 -29
- package/src/llama.cpp/.github/workflows/gguf-publish.yml +0 -44
- package/src/llama.cpp/.github/workflows/labeler.yml +0 -17
- package/src/llama.cpp/.github/workflows/python-check-requirements.yml +0 -33
- package/src/llama.cpp/.github/workflows/python-lint.yml +0 -30
- package/src/llama.cpp/.github/workflows/python-type-check.yml +0 -40
- package/src/llama.cpp/.github/workflows/release.yml +0 -739
- package/src/llama.cpp/.github/workflows/server.yml +0 -237
- package/src/llama.cpp/.github/workflows/winget.yml +0 -42
- package/src/llama.cpp/cmake/arm64-apple-clang.cmake +0 -16
- package/src/llama.cpp/cmake/arm64-windows-llvm.cmake +0 -16
- package/src/llama.cpp/cmake/build-info.cmake +0 -64
- package/src/llama.cpp/cmake/common.cmake +0 -35
- package/src/llama.cpp/cmake/git-vars.cmake +0 -22
- package/src/llama.cpp/cmake/x64-windows-llvm.cmake +0 -5
- package/src/llama.cpp/common/build-info.cpp.in +0 -4
- package/src/llama.cpp/docs/build.md +0 -561
- package/src/llama.cpp/examples/CMakeLists.txt +0 -43
- package/src/llama.cpp/examples/batched/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/batched/batched.cpp +0 -246
- package/src/llama.cpp/examples/chat-13B.bat +0 -57
- package/src/llama.cpp/examples/convert-llama2c-to-ggml/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +0 -941
- package/src/llama.cpp/examples/deprecation-warning/deprecation-warning.cpp +0 -35
- package/src/llama.cpp/examples/embedding/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/embedding/embedding.cpp +0 -323
- package/src/llama.cpp/examples/eval-callback/CMakeLists.txt +0 -10
- package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +0 -194
- package/src/llama.cpp/examples/gen-docs/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/gen-docs/gen-docs.cpp +0 -83
- package/src/llama.cpp/examples/gguf/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/gguf/gguf.cpp +0 -265
- package/src/llama.cpp/examples/gguf-hash/CMakeLists.txt +0 -22
- package/src/llama.cpp/examples/gguf-hash/deps/rotate-bits/rotate-bits.h +0 -46
- package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.c +0 -295
- package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.h +0 -52
- package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.c +0 -221
- package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.h +0 -24
- package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.c +0 -42
- package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.h +0 -7093
- package/src/llama.cpp/examples/gguf-hash/gguf-hash.cpp +0 -694
- package/src/llama.cpp/examples/gritlm/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/gritlm/gritlm.cpp +0 -229
- package/src/llama.cpp/examples/jeopardy/questions.txt +0 -100
- package/src/llama.cpp/examples/llama.android/app/build.gradle.kts +0 -65
- package/src/llama.cpp/examples/llama.android/build.gradle.kts +0 -6
- package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +0 -71
- package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/CMakeLists.txt +0 -53
- package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +0 -452
- package/src/llama.cpp/examples/llama.android/settings.gradle.kts +0 -18
- package/src/llama.cpp/examples/lookahead/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/lookahead/lookahead.cpp +0 -472
- package/src/llama.cpp/examples/lookup/CMakeLists.txt +0 -23
- package/src/llama.cpp/examples/lookup/lookup-create.cpp +0 -40
- package/src/llama.cpp/examples/lookup/lookup-merge.cpp +0 -47
- package/src/llama.cpp/examples/lookup/lookup-stats.cpp +0 -157
- package/src/llama.cpp/examples/lookup/lookup.cpp +0 -242
- package/src/llama.cpp/examples/parallel/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/parallel/parallel.cpp +0 -492
- package/src/llama.cpp/examples/passkey/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/passkey/passkey.cpp +0 -277
- package/src/llama.cpp/examples/retrieval/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/retrieval/retrieval.cpp +0 -304
- package/src/llama.cpp/examples/save-load-state/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +0 -246
- package/src/llama.cpp/examples/simple/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/simple/simple.cpp +0 -206
- package/src/llama.cpp/examples/simple-chat/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/simple-chat/simple-chat.cpp +0 -206
- package/src/llama.cpp/examples/simple-cmake-pkg/CMakeLists.txt +0 -11
- package/src/llama.cpp/examples/speculative/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/speculative/speculative.cpp +0 -644
- package/src/llama.cpp/examples/speculative-simple/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/speculative-simple/speculative-simple.cpp +0 -261
- package/src/llama.cpp/examples/sycl/CMakeLists.txt +0 -9
- package/src/llama.cpp/examples/sycl/build.sh +0 -23
- package/src/llama.cpp/examples/sycl/ls-sycl-device.cpp +0 -13
- package/src/llama.cpp/examples/sycl/run-llama2.sh +0 -27
- package/src/llama.cpp/examples/sycl/run-llama3.sh +0 -28
- package/src/llama.cpp/examples/sycl/win-build-sycl.bat +0 -33
- package/src/llama.cpp/examples/sycl/win-run-llama2.bat +0 -9
- package/src/llama.cpp/examples/sycl/win-run-llama3.bat +0 -9
- package/src/llama.cpp/examples/training/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/training/finetune.cpp +0 -96
- package/src/llama.cpp/ggml/cmake/GitVars.cmake +0 -22
- package/src/llama.cpp/ggml/cmake/common.cmake +0 -26
- package/src/llama.cpp/ggml/src/ggml-alloc.c +0 -1042
- package/src/llama.cpp/ggml/src/ggml-backend-impl.h +0 -255
- package/src/llama.cpp/ggml/src/ggml-backend-reg.cpp +0 -586
- package/src/llama.cpp/ggml/src/ggml-backend.cpp +0 -2008
- package/src/llama.cpp/ggml/src/ggml-blas/CMakeLists.txt +0 -87
- package/src/llama.cpp/ggml/src/ggml-blas/ggml-blas.cpp +0 -517
- package/src/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +0 -74
- package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +0 -179
- package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +0 -258
- package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +0 -2863
- package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +0 -1110
- package/src/llama.cpp/ggml/src/ggml-cann/common.h +0 -420
- package/src/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +0 -2570
- package/src/llama.cpp/ggml/src/ggml-common.h +0 -1857
- package/src/llama.cpp/ggml/src/ggml-cpu/cmake/FindSIMD.cmake +0 -100
- package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +0 -184
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/cuda.h +0 -15
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +0 -243
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +0 -140
- package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +0 -131
- package/src/llama.cpp/ggml/src/ggml-impl.h +0 -601
- package/src/llama.cpp/ggml/src/ggml-kompute/CMakeLists.txt +0 -166
- package/src/llama.cpp/ggml/src/ggml-kompute/ggml-kompute.cpp +0 -2251
- package/src/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +0 -120
- package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +0 -622
- package/src/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +0 -113
- package/src/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +0 -96
- package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +0 -5124
- package/src/llama.cpp/ggml/src/ggml-opt.cpp +0 -1037
- package/src/llama.cpp/ggml/src/ggml-quants.c +0 -5232
- package/src/llama.cpp/ggml/src/ggml-quants.h +0 -100
- package/src/llama.cpp/ggml/src/ggml-rpc/CMakeLists.txt +0 -9
- package/src/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +0 -1813
- package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +0 -189
- package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +0 -37
- package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp +0 -239
- package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.hpp +0 -39
- package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +0 -83
- package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +0 -493
- package/src/llama.cpp/ggml/src/ggml-sycl/concat.cpp +0 -197
- package/src/llama.cpp/ggml/src/ggml-sycl/concat.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/conv.cpp +0 -100
- package/src/llama.cpp/ggml/src/ggml-sycl/conv.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +0 -623
- package/src/llama.cpp/ggml/src/ggml-sycl/convert.hpp +0 -34
- package/src/llama.cpp/ggml/src/ggml-sycl/cpy.cpp +0 -701
- package/src/llama.cpp/ggml/src/ggml-sycl/cpy.hpp +0 -11
- package/src/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +0 -791
- package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +0 -1160
- package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.hpp +0 -27
- package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +0 -2957
- package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +0 -1536
- package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +0 -75
- package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +0 -99
- package/src/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +0 -311
- package/src/llama.cpp/ggml/src/ggml-sycl/getrows.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +0 -4443
- package/src/llama.cpp/ggml/src/ggml-sycl/gla.cpp +0 -105
- package/src/llama.cpp/ggml/src/ggml-sycl/gla.hpp +0 -8
- package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +0 -136
- package/src/llama.cpp/ggml/src/ggml-sycl/im2col.hpp +0 -21
- package/src/llama.cpp/ggml/src/ggml-sycl/mmq.cpp +0 -3030
- package/src/llama.cpp/ggml/src/ggml-sycl/mmq.hpp +0 -33
- package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +0 -1108
- package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.hpp +0 -27
- package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +0 -474
- package/src/llama.cpp/ggml/src/ggml-sycl/norm.hpp +0 -26
- package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +0 -46
- package/src/llama.cpp/ggml/src/ggml-sycl/outprod.hpp +0 -10
- package/src/llama.cpp/ggml/src/ggml-sycl/presets.hpp +0 -74
- package/src/llama.cpp/ggml/src/ggml-sycl/quants.hpp +0 -83
- package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +0 -362
- package/src/llama.cpp/ggml/src/ggml-sycl/rope.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +0 -264
- package/src/llama.cpp/ggml/src/ggml-sycl/softmax.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.cpp +0 -13
- package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.hpp +0 -23
- package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +0 -73
- package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +0 -1215
- package/src/llama.cpp/ggml/src/ggml-sycl/wkv.cpp +0 -305
- package/src/llama.cpp/ggml/src/ggml-sycl/wkv.hpp +0 -10
- package/src/llama.cpp/ggml/src/ggml-threading.cpp +0 -12
- package/src/llama.cpp/ggml/src/ggml-threading.h +0 -14
- package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +0 -196
- package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +0 -10699
- package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +0 -39
- package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +0 -751
- package/src/llama.cpp/ggml/src/ggml.c +0 -6550
- package/src/llama.cpp/ggml/src/gguf.cpp +0 -1330
- package/src/llama.cpp/models/.editorconfig +0 -1
- package/src/llama.cpp/models/ggml-vocab-aquila.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-baichuan.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-command-r.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-command-r.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-command-r.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-falcon.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-falcon.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-falcon.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-gpt-neox.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-llama4.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-llama4.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-mpt.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-mpt.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-mpt.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-phi-3.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-qwen2.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-refact.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-refact.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-refact.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-starcoder.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.out +0 -46
- package/src/llama.cpp/pocs/CMakeLists.txt +0 -14
- package/src/llama.cpp/pocs/vdot/CMakeLists.txt +0 -9
- package/src/llama.cpp/pocs/vdot/q8dot.cpp +0 -173
- package/src/llama.cpp/pocs/vdot/vdot.cpp +0 -311
- package/src/llama.cpp/prompts/LLM-questions.txt +0 -49
- package/src/llama.cpp/prompts/alpaca.txt +0 -1
- package/src/llama.cpp/prompts/assistant.txt +0 -31
- package/src/llama.cpp/prompts/chat-with-baichuan.txt +0 -4
- package/src/llama.cpp/prompts/chat-with-bob.txt +0 -7
- package/src/llama.cpp/prompts/chat-with-qwen.txt +0 -1
- package/src/llama.cpp/prompts/chat-with-vicuna-v0.txt +0 -7
- package/src/llama.cpp/prompts/chat-with-vicuna-v1.txt +0 -7
- package/src/llama.cpp/prompts/chat.txt +0 -28
- package/src/llama.cpp/prompts/dan-modified.txt +0 -1
- package/src/llama.cpp/prompts/dan.txt +0 -1
- package/src/llama.cpp/prompts/mnemonics.txt +0 -93
- package/src/llama.cpp/prompts/parallel-questions.txt +0 -43
- package/src/llama.cpp/prompts/reason-act.txt +0 -18
- package/src/llama.cpp/requirements/requirements-all.txt +0 -15
- package/src/llama.cpp/requirements/requirements-compare-llama-bench.txt +0 -2
- package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt +0 -7
- package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf_update.txt +0 -7
- package/src/llama.cpp/requirements/requirements-convert_legacy_llama.txt +0 -5
- package/src/llama.cpp/requirements/requirements-convert_llama_ggml_to_gguf.txt +0 -1
- package/src/llama.cpp/requirements/requirements-convert_lora_to_gguf.txt +0 -4
- package/src/llama.cpp/requirements/requirements-gguf_editor_gui.txt +0 -3
- package/src/llama.cpp/requirements/requirements-pydantic.txt +0 -3
- package/src/llama.cpp/requirements/requirements-test-tokenizer-random.txt +0 -1
- package/src/llama.cpp/requirements/requirements-tool_bench.txt +0 -12
- package/src/llama.cpp/requirements.txt +0 -13
- package/src/llama.cpp/scripts/build-info.sh +0 -30
- package/src/llama.cpp/scripts/install-oneapi.bat +0 -19
- package/src/llama.cpp/scripts/xxd.cmake +0 -16
- package/src/llama.cpp/tests/CMakeLists.txt +0 -177
- package/src/llama.cpp/tests/get-model.cpp +0 -21
- package/src/llama.cpp/tests/get-model.h +0 -2
- package/src/llama.cpp/tests/test-arg-parser.cpp +0 -178
- package/src/llama.cpp/tests/test-autorelease.cpp +0 -24
- package/src/llama.cpp/tests/test-backend-ops.cpp +0 -4793
- package/src/llama.cpp/tests/test-barrier.cpp +0 -94
- package/src/llama.cpp/tests/test-c.c +0 -7
- package/src/llama.cpp/tests/test-chat-template.cpp +0 -417
- package/src/llama.cpp/tests/test-chat.cpp +0 -985
- package/src/llama.cpp/tests/test-double-float.cpp +0 -57
- package/src/llama.cpp/tests/test-gbnf-validator.cpp +0 -109
- package/src/llama.cpp/tests/test-gguf.cpp +0 -1338
- package/src/llama.cpp/tests/test-grammar-integration.cpp +0 -1308
- package/src/llama.cpp/tests/test-grammar-llguidance.cpp +0 -1201
- package/src/llama.cpp/tests/test-grammar-parser.cpp +0 -519
- package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +0 -1304
- package/src/llama.cpp/tests/test-llama-grammar.cpp +0 -408
- package/src/llama.cpp/tests/test-log.cpp +0 -39
- package/src/llama.cpp/tests/test-model-load-cancel.cpp +0 -27
- package/src/llama.cpp/tests/test-mtmd-c-api.c +0 -63
- package/src/llama.cpp/tests/test-opt.cpp +0 -904
- package/src/llama.cpp/tests/test-quantize-fns.cpp +0 -186
- package/src/llama.cpp/tests/test-quantize-perf.cpp +0 -365
- package/src/llama.cpp/tests/test-quantize-stats.cpp +0 -424
- package/src/llama.cpp/tests/test-regex-partial.cpp +0 -288
- package/src/llama.cpp/tests/test-rope.cpp +0 -262
- package/src/llama.cpp/tests/test-sampling.cpp +0 -399
- package/src/llama.cpp/tests/test-tokenizer-0.cpp +0 -312
- package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +0 -155
- package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +0 -125
- package/src/llama.cpp/tools/CMakeLists.txt +0 -39
- package/src/llama.cpp/tools/batched-bench/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/batched-bench/batched-bench.cpp +0 -204
- package/src/llama.cpp/tools/cvector-generator/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/cvector-generator/completions.txt +0 -582
- package/src/llama.cpp/tools/cvector-generator/cvector-generator.cpp +0 -508
- package/src/llama.cpp/tools/cvector-generator/mean.hpp +0 -48
- package/src/llama.cpp/tools/cvector-generator/negative.txt +0 -4
- package/src/llama.cpp/tools/cvector-generator/pca.hpp +0 -315
- package/src/llama.cpp/tools/cvector-generator/positive.txt +0 -4
- package/src/llama.cpp/tools/export-lora/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/export-lora/export-lora.cpp +0 -434
- package/src/llama.cpp/tools/gguf-split/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/gguf-split/gguf-split.cpp +0 -583
- package/src/llama.cpp/tools/imatrix/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/imatrix/imatrix.cpp +0 -667
- package/src/llama.cpp/tools/llama-bench/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/llama-bench/llama-bench.cpp +0 -2024
- package/src/llama.cpp/tools/main/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/main/main.cpp +0 -977
- package/src/llama.cpp/tools/mtmd/CMakeLists.txt +0 -58
- package/src/llama.cpp/tools/mtmd/clip-impl.h +0 -462
- package/src/llama.cpp/tools/mtmd/clip.cpp +0 -4024
- package/src/llama.cpp/tools/mtmd/clip.h +0 -101
- package/src/llama.cpp/tools/mtmd/deprecation-warning.cpp +0 -22
- package/src/llama.cpp/tools/mtmd/miniaudio.h +0 -93468
- package/src/llama.cpp/tools/mtmd/mtmd-audio.cpp +0 -855
- package/src/llama.cpp/tools/mtmd/mtmd-audio.h +0 -62
- package/src/llama.cpp/tools/mtmd/mtmd-cli.cpp +0 -377
- package/src/llama.cpp/tools/mtmd/mtmd-helper.cpp +0 -297
- package/src/llama.cpp/tools/mtmd/mtmd.cpp +0 -942
- package/src/llama.cpp/tools/mtmd/mtmd.h +0 -362
- package/src/llama.cpp/tools/mtmd/requirements.txt +0 -5
- package/src/llama.cpp/tools/perplexity/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/perplexity/perplexity.cpp +0 -2063
- package/src/llama.cpp/tools/quantize/CMakeLists.txt +0 -6
- package/src/llama.cpp/tools/quantize/quantize.cpp +0 -519
- package/src/llama.cpp/tools/rpc/CMakeLists.txt +0 -4
- package/src/llama.cpp/tools/rpc/rpc-server.cpp +0 -322
- package/src/llama.cpp/tools/run/CMakeLists.txt +0 -16
- package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.cpp +0 -1995
- package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.h +0 -137
- package/src/llama.cpp/tools/run/run.cpp +0 -1261
- package/src/llama.cpp/tools/server/CMakeLists.txt +0 -51
- package/src/llama.cpp/tools/server/bench/requirements.txt +0 -2
- package/src/llama.cpp/tools/server/httplib.h +0 -10506
- package/src/llama.cpp/tools/server/server.cpp +0 -4966
- package/src/llama.cpp/tools/server/tests/requirements.txt +0 -8
- package/src/llama.cpp/tools/server/utils.hpp +0 -1337
- package/src/llama.cpp/tools/tokenize/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/tokenize/tokenize.cpp +0 -416
- package/src/llama.cpp/tools/tts/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/tts/tts.cpp +0 -1092
|
@@ -1,62 +0,0 @@
|
|
|
1
|
-
#pragma once
|
|
2
|
-
|
|
3
|
-
#include "ggml.h"
|
|
4
|
-
|
|
5
|
-
#include <cstdint>
|
|
6
|
-
#include <vector>
|
|
7
|
-
#include <string>
|
|
8
|
-
|
|
9
|
-
#define WHISPER_ASSERT GGML_ASSERT
|
|
10
|
-
|
|
11
|
-
#define WHISPER_SAMPLE_RATE 16000
|
|
12
|
-
#define WHISPER_N_FFT 400
|
|
13
|
-
#define WHISPER_HOP_LENGTH 160
|
|
14
|
-
#define WHISPER_CHUNK_SIZE 30
|
|
15
|
-
|
|
16
|
-
#define COMMON_SAMPLE_RATE 16000
|
|
17
|
-
|
|
18
|
-
namespace whisper_preprocessor {
|
|
19
|
-
|
|
20
|
-
struct whisper_mel {
|
|
21
|
-
int n_len;
|
|
22
|
-
int n_len_org;
|
|
23
|
-
int n_mel;
|
|
24
|
-
|
|
25
|
-
std::vector<float> data;
|
|
26
|
-
};
|
|
27
|
-
|
|
28
|
-
struct whisper_filters {
|
|
29
|
-
int32_t n_mel;
|
|
30
|
-
int32_t n_fft;
|
|
31
|
-
|
|
32
|
-
std::vector<float> data;
|
|
33
|
-
};
|
|
34
|
-
|
|
35
|
-
extern bool preprocess_audio(
|
|
36
|
-
const float * samples,
|
|
37
|
-
size_t n_samples,
|
|
38
|
-
const whisper_filters & filters,
|
|
39
|
-
std::vector<whisper_mel> & output);
|
|
40
|
-
|
|
41
|
-
} // namespace whisper_preprocessor
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
// TODO @ngxson : move this helper to mtmd-helpers.cpp
|
|
45
|
-
namespace audio_helpers {
|
|
46
|
-
|
|
47
|
-
extern bool is_audio_file(const char * buf, size_t len);
|
|
48
|
-
|
|
49
|
-
extern bool decode_audio_from_buf(
|
|
50
|
-
const unsigned char * buf_in,
|
|
51
|
-
size_t len,
|
|
52
|
-
int target_sampler_rate,
|
|
53
|
-
std::vector<float> & pcmf32_mono);
|
|
54
|
-
|
|
55
|
-
} // namespace audio_helpers
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
namespace whisper_precalc_filters {
|
|
59
|
-
|
|
60
|
-
extern whisper_preprocessor::whisper_filters get_128_bins();
|
|
61
|
-
|
|
62
|
-
} // namespace whisper_precalc_filters
|
|
@@ -1,377 +0,0 @@
|
|
|
1
|
-
#include "arg.h"
|
|
2
|
-
#include "log.h"
|
|
3
|
-
#include "common.h"
|
|
4
|
-
#include "sampling.h"
|
|
5
|
-
#include "llama.h"
|
|
6
|
-
#include "ggml.h"
|
|
7
|
-
#include "console.h"
|
|
8
|
-
#include "chat.h"
|
|
9
|
-
#include "mtmd.h"
|
|
10
|
-
|
|
11
|
-
#include <vector>
|
|
12
|
-
#include <limits.h>
|
|
13
|
-
#include <cinttypes>
|
|
14
|
-
|
|
15
|
-
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
|
|
16
|
-
#include <signal.h>
|
|
17
|
-
#include <unistd.h>
|
|
18
|
-
#elif defined (_WIN32)
|
|
19
|
-
#define WIN32_LEAN_AND_MEAN
|
|
20
|
-
#ifndef NOMINMAX
|
|
21
|
-
#define NOMINMAX
|
|
22
|
-
#endif
|
|
23
|
-
#include <windows.h>
|
|
24
|
-
#include <signal.h>
|
|
25
|
-
#endif
|
|
26
|
-
|
|
27
|
-
// volatile, because of signal being an interrupt
|
|
28
|
-
static volatile bool g_is_generating = false;
|
|
29
|
-
static volatile bool g_is_interrupted = false;
|
|
30
|
-
|
|
31
|
-
/**
|
|
32
|
-
* Please note that this is NOT a production-ready stuff.
|
|
33
|
-
* It is a playground for trying multimodal support in llama.cpp.
|
|
34
|
-
* For contributors: please keep this code simple and easy to understand.
|
|
35
|
-
*/
|
|
36
|
-
|
|
37
|
-
static void show_additional_info(int /*argc*/, char ** argv) {
|
|
38
|
-
LOG(
|
|
39
|
-
"Experimental CLI for multimodal\n\n"
|
|
40
|
-
"Usage: %s [options] -m <model> --mmproj <mmproj> --image <image> --audio <audio> -p <prompt>\n\n"
|
|
41
|
-
" -m and --mmproj are required\n"
|
|
42
|
-
" -hf user/repo can replace both -m and --mmproj in most cases\n"
|
|
43
|
-
" --image, --audio and -p are optional, if NOT provided, the CLI will run in chat mode\n"
|
|
44
|
-
" to disable using GPU for mmproj model, add --no-mmproj-offload\n",
|
|
45
|
-
argv[0]
|
|
46
|
-
);
|
|
47
|
-
}
|
|
48
|
-
|
|
49
|
-
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
|
|
50
|
-
static void sigint_handler(int signo) {
|
|
51
|
-
if (signo == SIGINT) {
|
|
52
|
-
if (g_is_generating) {
|
|
53
|
-
g_is_generating = false;
|
|
54
|
-
} else {
|
|
55
|
-
console::cleanup();
|
|
56
|
-
if (g_is_interrupted) {
|
|
57
|
-
_exit(1);
|
|
58
|
-
}
|
|
59
|
-
g_is_interrupted = true;
|
|
60
|
-
}
|
|
61
|
-
}
|
|
62
|
-
}
|
|
63
|
-
#endif
|
|
64
|
-
|
|
65
|
-
struct mtmd_cli_context {
|
|
66
|
-
mtmd::context_ptr ctx_vision;
|
|
67
|
-
common_init_result llama_init;
|
|
68
|
-
|
|
69
|
-
llama_model * model;
|
|
70
|
-
llama_context * lctx;
|
|
71
|
-
const llama_vocab * vocab;
|
|
72
|
-
llama_batch batch;
|
|
73
|
-
int n_batch;
|
|
74
|
-
|
|
75
|
-
mtmd::bitmaps bitmaps;
|
|
76
|
-
|
|
77
|
-
// note: we know that gemma3 template is "linear", meaning each turn is completely separated to another
|
|
78
|
-
// so here we don't need to keep track of chat history
|
|
79
|
-
common_chat_templates_ptr tmpls;
|
|
80
|
-
|
|
81
|
-
// support for legacy templates (models not having EOT token)
|
|
82
|
-
llama_tokens antiprompt_tokens;
|
|
83
|
-
|
|
84
|
-
int n_threads = 1;
|
|
85
|
-
llama_pos n_past = 0;
|
|
86
|
-
|
|
87
|
-
mtmd_cli_context(common_params & params) : llama_init(common_init_from_params(params)) {
|
|
88
|
-
model = llama_init.model.get();
|
|
89
|
-
lctx = llama_init.context.get();
|
|
90
|
-
vocab = llama_model_get_vocab(model);
|
|
91
|
-
n_threads = params.cpuparams.n_threads;
|
|
92
|
-
batch = llama_batch_init(params.n_batch, 0, 1);
|
|
93
|
-
n_batch = params.n_batch;
|
|
94
|
-
|
|
95
|
-
if (!model || !lctx) {
|
|
96
|
-
exit(1);
|
|
97
|
-
}
|
|
98
|
-
|
|
99
|
-
if (!llama_model_chat_template(model, nullptr) && params.chat_template.empty()) {
|
|
100
|
-
LOG_ERR("Model does not have chat template.\n");
|
|
101
|
-
LOG_ERR(" For old llava models, you may need to use '--chat-template vicuna'\n");
|
|
102
|
-
LOG_ERR(" For MobileVLM models, use '--chat-template deepseek'\n");
|
|
103
|
-
LOG_ERR(" For Mistral Small 3.1, use '--chat-template mistral-v7'\n");
|
|
104
|
-
exit(1);
|
|
105
|
-
}
|
|
106
|
-
|
|
107
|
-
tmpls = common_chat_templates_init(model, params.chat_template);
|
|
108
|
-
LOG_INF("%s: chat template example:\n%s\n", __func__, common_chat_format_example(tmpls.get(), params.use_jinja).c_str());
|
|
109
|
-
|
|
110
|
-
init_vision_context(params);
|
|
111
|
-
|
|
112
|
-
// load antiprompt tokens for legacy templates
|
|
113
|
-
if (params.chat_template == "vicuna") {
|
|
114
|
-
antiprompt_tokens = common_tokenize(lctx, "ASSISTANT:", false, true);
|
|
115
|
-
} else if (params.chat_template == "deepseek") {
|
|
116
|
-
antiprompt_tokens = common_tokenize(lctx, "###", false, true);
|
|
117
|
-
}
|
|
118
|
-
}
|
|
119
|
-
|
|
120
|
-
void init_vision_context(common_params & params) {
|
|
121
|
-
const char * clip_path = params.mmproj.path.c_str();
|
|
122
|
-
mtmd_context_params mparams = mtmd_context_params_default();
|
|
123
|
-
mparams.use_gpu = params.mmproj_use_gpu;
|
|
124
|
-
mparams.print_timings = true;
|
|
125
|
-
mparams.n_threads = params.cpuparams.n_threads;
|
|
126
|
-
mparams.verbosity = params.verbosity > 0 ? GGML_LOG_LEVEL_DEBUG : GGML_LOG_LEVEL_INFO;
|
|
127
|
-
ctx_vision.reset(mtmd_init_from_file(clip_path, model, mparams));
|
|
128
|
-
if (!ctx_vision.get()) {
|
|
129
|
-
LOG_ERR("Failed to load vision model from %s\n", clip_path);
|
|
130
|
-
exit(1);
|
|
131
|
-
}
|
|
132
|
-
}
|
|
133
|
-
|
|
134
|
-
bool check_antiprompt(const llama_tokens & generated_tokens) {
|
|
135
|
-
if (antiprompt_tokens.empty() || generated_tokens.size() < antiprompt_tokens.size()) {
|
|
136
|
-
return false;
|
|
137
|
-
}
|
|
138
|
-
return std::equal(
|
|
139
|
-
generated_tokens.end() - antiprompt_tokens.size(),
|
|
140
|
-
generated_tokens.end(),
|
|
141
|
-
antiprompt_tokens.begin()
|
|
142
|
-
);
|
|
143
|
-
}
|
|
144
|
-
|
|
145
|
-
bool load_media(const std::string & fname) {
|
|
146
|
-
mtmd::bitmap bmp(mtmd_helper_bitmap_init_from_file(fname.c_str()));
|
|
147
|
-
if (!bmp.ptr) {
|
|
148
|
-
return false;
|
|
149
|
-
}
|
|
150
|
-
bitmaps.entries.push_back(std::move(bmp));
|
|
151
|
-
return true;
|
|
152
|
-
}
|
|
153
|
-
};
|
|
154
|
-
|
|
155
|
-
static int generate_response(mtmd_cli_context & ctx, common_sampler * smpl, int n_predict) {
|
|
156
|
-
llama_tokens generated_tokens;
|
|
157
|
-
for (int i = 0; i < n_predict; i++) {
|
|
158
|
-
if (i > n_predict || !g_is_generating || g_is_interrupted) {
|
|
159
|
-
LOG("\n");
|
|
160
|
-
break;
|
|
161
|
-
}
|
|
162
|
-
|
|
163
|
-
llama_token token_id = common_sampler_sample(smpl, ctx.lctx, -1);
|
|
164
|
-
generated_tokens.push_back(token_id);
|
|
165
|
-
common_sampler_accept(smpl, token_id, true);
|
|
166
|
-
|
|
167
|
-
if (llama_vocab_is_eog(ctx.vocab, token_id) || ctx.check_antiprompt(generated_tokens)) {
|
|
168
|
-
LOG("\n");
|
|
169
|
-
break; // end of generation
|
|
170
|
-
}
|
|
171
|
-
|
|
172
|
-
LOG("%s", common_token_to_piece(ctx.lctx, token_id).c_str());
|
|
173
|
-
fflush(stdout);
|
|
174
|
-
|
|
175
|
-
if (g_is_interrupted) {
|
|
176
|
-
LOG("\n");
|
|
177
|
-
break;
|
|
178
|
-
}
|
|
179
|
-
|
|
180
|
-
// eval the token
|
|
181
|
-
common_batch_clear(ctx.batch);
|
|
182
|
-
common_batch_add(ctx.batch, token_id, ctx.n_past++, {0}, true);
|
|
183
|
-
if (llama_decode(ctx.lctx, ctx.batch)) {
|
|
184
|
-
LOG_ERR("failed to decode token\n");
|
|
185
|
-
return 1;
|
|
186
|
-
}
|
|
187
|
-
}
|
|
188
|
-
return 0;
|
|
189
|
-
}
|
|
190
|
-
|
|
191
|
-
static int eval_message(mtmd_cli_context & ctx, common_chat_msg & msg, bool add_bos = false) {
|
|
192
|
-
common_chat_templates_inputs tmpl_inputs;
|
|
193
|
-
tmpl_inputs.messages = {msg};
|
|
194
|
-
tmpl_inputs.add_generation_prompt = true;
|
|
195
|
-
tmpl_inputs.use_jinja = false; // jinja is buggy here
|
|
196
|
-
auto formatted_chat = common_chat_templates_apply(ctx.tmpls.get(), tmpl_inputs);
|
|
197
|
-
LOG_DBG("formatted_chat.prompt: %s\n", formatted_chat.prompt.c_str());
|
|
198
|
-
|
|
199
|
-
mtmd_input_text text;
|
|
200
|
-
text.text = formatted_chat.prompt.c_str();
|
|
201
|
-
text.add_special = add_bos;
|
|
202
|
-
text.parse_special = true;
|
|
203
|
-
|
|
204
|
-
if (g_is_interrupted) return 0;
|
|
205
|
-
|
|
206
|
-
mtmd::input_chunks chunks(mtmd_input_chunks_init());
|
|
207
|
-
auto bitmaps_c_ptr = ctx.bitmaps.c_ptr();
|
|
208
|
-
int32_t res = mtmd_tokenize(ctx.ctx_vision.get(),
|
|
209
|
-
chunks.ptr.get(), // output
|
|
210
|
-
&text, // text
|
|
211
|
-
bitmaps_c_ptr.data(),
|
|
212
|
-
bitmaps_c_ptr.size());
|
|
213
|
-
if (res != 0) {
|
|
214
|
-
LOG_ERR("Unable to tokenize prompt, res = %d\n", res);
|
|
215
|
-
return 1;
|
|
216
|
-
}
|
|
217
|
-
|
|
218
|
-
ctx.bitmaps.entries.clear();
|
|
219
|
-
|
|
220
|
-
llama_pos new_n_past;
|
|
221
|
-
if (mtmd_helper_eval_chunks(ctx.ctx_vision.get(),
|
|
222
|
-
ctx.lctx, // lctx
|
|
223
|
-
chunks.ptr.get(), // chunks
|
|
224
|
-
ctx.n_past, // n_past
|
|
225
|
-
0, // seq_id
|
|
226
|
-
ctx.n_batch, // n_batch
|
|
227
|
-
true, // logits_last
|
|
228
|
-
&new_n_past)) {
|
|
229
|
-
LOG_ERR("Unable to eval prompt\n");
|
|
230
|
-
return 1;
|
|
231
|
-
}
|
|
232
|
-
|
|
233
|
-
ctx.n_past = new_n_past;
|
|
234
|
-
|
|
235
|
-
LOG("\n");
|
|
236
|
-
|
|
237
|
-
return 0;
|
|
238
|
-
}
|
|
239
|
-
|
|
240
|
-
int main(int argc, char ** argv) {
|
|
241
|
-
ggml_time_init();
|
|
242
|
-
|
|
243
|
-
common_params params;
|
|
244
|
-
params.sampling.temp = 0.2; // lower temp by default for better quality
|
|
245
|
-
|
|
246
|
-
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_MTMD, show_additional_info)) {
|
|
247
|
-
return 1;
|
|
248
|
-
}
|
|
249
|
-
|
|
250
|
-
common_init();
|
|
251
|
-
|
|
252
|
-
if (params.mmproj.path.empty()) {
|
|
253
|
-
show_additional_info(argc, argv);
|
|
254
|
-
LOG_ERR("ERR: Missing --mmproj argument\n");
|
|
255
|
-
return 1;
|
|
256
|
-
}
|
|
257
|
-
|
|
258
|
-
mtmd_cli_context ctx(params);
|
|
259
|
-
LOG("%s: loading model: %s\n", __func__, params.model.path.c_str());
|
|
260
|
-
|
|
261
|
-
bool is_single_turn = !params.prompt.empty() && !params.image.empty();
|
|
262
|
-
|
|
263
|
-
struct common_sampler * smpl = common_sampler_init(ctx.model, params.sampling);
|
|
264
|
-
int n_predict = params.n_predict < 0 ? INT_MAX : params.n_predict;
|
|
265
|
-
|
|
266
|
-
// Ctrl+C handling
|
|
267
|
-
{
|
|
268
|
-
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
|
|
269
|
-
struct sigaction sigint_action;
|
|
270
|
-
sigint_action.sa_handler = sigint_handler;
|
|
271
|
-
sigemptyset (&sigint_action.sa_mask);
|
|
272
|
-
sigint_action.sa_flags = 0;
|
|
273
|
-
sigaction(SIGINT, &sigint_action, NULL);
|
|
274
|
-
#elif defined (_WIN32)
|
|
275
|
-
auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
|
|
276
|
-
return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
|
|
277
|
-
};
|
|
278
|
-
SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
|
|
279
|
-
#endif
|
|
280
|
-
}
|
|
281
|
-
|
|
282
|
-
if (g_is_interrupted) return 130;
|
|
283
|
-
|
|
284
|
-
if (is_single_turn) {
|
|
285
|
-
g_is_generating = true;
|
|
286
|
-
if (params.prompt.find(mtmd_default_marker()) == std::string::npos) {
|
|
287
|
-
params.prompt += mtmd_default_marker();
|
|
288
|
-
}
|
|
289
|
-
common_chat_msg msg;
|
|
290
|
-
msg.role = "user";
|
|
291
|
-
msg.content = params.prompt;
|
|
292
|
-
for (const auto & image : params.image) {
|
|
293
|
-
if (!ctx.load_media(image)) {
|
|
294
|
-
return 1; // error is already printed by libmtmd
|
|
295
|
-
}
|
|
296
|
-
}
|
|
297
|
-
if (eval_message(ctx, msg, true)) {
|
|
298
|
-
return 1;
|
|
299
|
-
}
|
|
300
|
-
if (!g_is_interrupted && generate_response(ctx, smpl, n_predict)) {
|
|
301
|
-
return 1;
|
|
302
|
-
}
|
|
303
|
-
|
|
304
|
-
} else {
|
|
305
|
-
LOG("\n Running in chat mode, available commands:");
|
|
306
|
-
if (mtmd_support_vision(ctx.ctx_vision.get())) {
|
|
307
|
-
LOG("\n /image <path> load an image");
|
|
308
|
-
}
|
|
309
|
-
if (mtmd_support_audio(ctx.ctx_vision.get())) {
|
|
310
|
-
LOG("\n /audio <path> load an audio");
|
|
311
|
-
}
|
|
312
|
-
LOG("\n /clear clear the chat history");
|
|
313
|
-
LOG("\n /quit or /exit exit the program");
|
|
314
|
-
LOG("\n");
|
|
315
|
-
|
|
316
|
-
bool is_first_msg = true;
|
|
317
|
-
std::string content;
|
|
318
|
-
|
|
319
|
-
while (!g_is_interrupted) {
|
|
320
|
-
g_is_generating = false;
|
|
321
|
-
LOG("\n> ");
|
|
322
|
-
console::set_display(console::user_input);
|
|
323
|
-
std::string line;
|
|
324
|
-
console::readline(line, false);
|
|
325
|
-
if (g_is_interrupted) break;
|
|
326
|
-
console::set_display(console::reset);
|
|
327
|
-
line = string_strip(line);
|
|
328
|
-
if (line.empty()) {
|
|
329
|
-
continue;
|
|
330
|
-
}
|
|
331
|
-
if (line == "/quit" || line == "/exit") {
|
|
332
|
-
break;
|
|
333
|
-
}
|
|
334
|
-
if (line == "/clear") {
|
|
335
|
-
ctx.n_past = 0;
|
|
336
|
-
llama_kv_self_seq_rm(ctx.lctx, 0, 1, -1); // keep BOS
|
|
337
|
-
LOG("Chat history cleared\n\n");
|
|
338
|
-
continue;
|
|
339
|
-
}
|
|
340
|
-
g_is_generating = true;
|
|
341
|
-
bool is_image = line == "/image" || line.find("/image ") == 0;
|
|
342
|
-
bool is_audio = line == "/audio" || line.find("/audio ") == 0;
|
|
343
|
-
if (is_image || is_audio) {
|
|
344
|
-
if (line.size() < 8) {
|
|
345
|
-
LOG_ERR("ERR: Missing media filename\n");
|
|
346
|
-
continue;
|
|
347
|
-
}
|
|
348
|
-
std::string media_path = line.substr(7);
|
|
349
|
-
if (ctx.load_media(media_path)) {
|
|
350
|
-
LOG("%s %s loaded\n", media_path.c_str(), is_image ? "image" : "audio");
|
|
351
|
-
content += mtmd_default_marker();
|
|
352
|
-
}
|
|
353
|
-
// else, error is already printed by libmtmd
|
|
354
|
-
continue;
|
|
355
|
-
} else {
|
|
356
|
-
content += line;
|
|
357
|
-
}
|
|
358
|
-
common_chat_msg msg;
|
|
359
|
-
msg.role = "user";
|
|
360
|
-
msg.content = content;
|
|
361
|
-
int ret = eval_message(ctx, msg, is_first_msg);
|
|
362
|
-
if (ret) {
|
|
363
|
-
return 1;
|
|
364
|
-
}
|
|
365
|
-
if (g_is_interrupted) break;
|
|
366
|
-
if (generate_response(ctx, smpl, n_predict)) {
|
|
367
|
-
return 1;
|
|
368
|
-
}
|
|
369
|
-
content.clear();
|
|
370
|
-
is_first_msg = false;
|
|
371
|
-
}
|
|
372
|
-
}
|
|
373
|
-
if (g_is_interrupted) LOG("\nInterrupted by user\n");
|
|
374
|
-
LOG("\n\n");
|
|
375
|
-
llama_perf_context_print(ctx.lctx);
|
|
376
|
-
return g_is_interrupted ? 130 : 0;
|
|
377
|
-
}
|