@fugood/llama.node 0.6.3 → 1.0.0-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CMakeLists.txt +40 -30
- package/README.md +4 -1
- package/lib/binding.js +41 -29
- package/lib/binding.ts +26 -25
- package/package.json +45 -7
- package/scripts/build.js +47 -0
- package/scripts/llama.cpp.patch +109 -0
- package/src/anyascii.c +22223 -0
- package/src/anyascii.h +42 -0
- package/src/tts_utils.cpp +20 -7
- package/src/tts_utils.h +2 -0
- package/bin/darwin/arm64/llama-node.node +0 -0
- package/bin/darwin/x64/llama-node.node +0 -0
- package/bin/linux/arm64/llama-node.node +0 -0
- package/bin/linux/x64/llama-node.node +0 -0
- package/bin/linux-cuda/arm64/llama-node.node +0 -0
- package/bin/linux-cuda/x64/llama-node.node +0 -0
- package/bin/linux-vulkan/arm64/llama-node.node +0 -0
- package/bin/linux-vulkan/x64/llama-node.node +0 -0
- package/bin/win32/x64/llama-node.node +0 -0
- package/bin/win32/x64/node.lib +0 -0
- package/bin/win32-vulkan/arm64/llama-node.node +0 -0
- package/bin/win32-vulkan/arm64/node.lib +0 -0
- package/bin/win32-vulkan/x64/llama-node.node +0 -0
- package/bin/win32-vulkan/x64/node.lib +0 -0
- package/src/llama.cpp/.github/workflows/build-linux-cross.yml +0 -233
- package/src/llama.cpp/.github/workflows/build.yml +0 -1078
- package/src/llama.cpp/.github/workflows/close-issue.yml +0 -28
- package/src/llama.cpp/.github/workflows/docker.yml +0 -178
- package/src/llama.cpp/.github/workflows/editorconfig.yml +0 -29
- package/src/llama.cpp/.github/workflows/gguf-publish.yml +0 -44
- package/src/llama.cpp/.github/workflows/labeler.yml +0 -17
- package/src/llama.cpp/.github/workflows/python-check-requirements.yml +0 -33
- package/src/llama.cpp/.github/workflows/python-lint.yml +0 -30
- package/src/llama.cpp/.github/workflows/python-type-check.yml +0 -40
- package/src/llama.cpp/.github/workflows/release.yml +0 -739
- package/src/llama.cpp/.github/workflows/server.yml +0 -237
- package/src/llama.cpp/.github/workflows/winget.yml +0 -42
- package/src/llama.cpp/cmake/arm64-apple-clang.cmake +0 -16
- package/src/llama.cpp/cmake/arm64-windows-llvm.cmake +0 -16
- package/src/llama.cpp/cmake/build-info.cmake +0 -64
- package/src/llama.cpp/cmake/common.cmake +0 -35
- package/src/llama.cpp/cmake/git-vars.cmake +0 -22
- package/src/llama.cpp/cmake/x64-windows-llvm.cmake +0 -5
- package/src/llama.cpp/common/build-info.cpp.in +0 -4
- package/src/llama.cpp/docs/build.md +0 -561
- package/src/llama.cpp/examples/CMakeLists.txt +0 -43
- package/src/llama.cpp/examples/batched/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/batched/batched.cpp +0 -246
- package/src/llama.cpp/examples/chat-13B.bat +0 -57
- package/src/llama.cpp/examples/convert-llama2c-to-ggml/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +0 -941
- package/src/llama.cpp/examples/deprecation-warning/deprecation-warning.cpp +0 -35
- package/src/llama.cpp/examples/embedding/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/embedding/embedding.cpp +0 -323
- package/src/llama.cpp/examples/eval-callback/CMakeLists.txt +0 -10
- package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +0 -194
- package/src/llama.cpp/examples/gen-docs/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/gen-docs/gen-docs.cpp +0 -83
- package/src/llama.cpp/examples/gguf/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/gguf/gguf.cpp +0 -265
- package/src/llama.cpp/examples/gguf-hash/CMakeLists.txt +0 -22
- package/src/llama.cpp/examples/gguf-hash/deps/rotate-bits/rotate-bits.h +0 -46
- package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.c +0 -295
- package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.h +0 -52
- package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.c +0 -221
- package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.h +0 -24
- package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.c +0 -42
- package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.h +0 -7093
- package/src/llama.cpp/examples/gguf-hash/gguf-hash.cpp +0 -694
- package/src/llama.cpp/examples/gritlm/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/gritlm/gritlm.cpp +0 -229
- package/src/llama.cpp/examples/jeopardy/questions.txt +0 -100
- package/src/llama.cpp/examples/llama.android/app/build.gradle.kts +0 -65
- package/src/llama.cpp/examples/llama.android/build.gradle.kts +0 -6
- package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +0 -71
- package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/CMakeLists.txt +0 -53
- package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +0 -452
- package/src/llama.cpp/examples/llama.android/settings.gradle.kts +0 -18
- package/src/llama.cpp/examples/lookahead/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/lookahead/lookahead.cpp +0 -472
- package/src/llama.cpp/examples/lookup/CMakeLists.txt +0 -23
- package/src/llama.cpp/examples/lookup/lookup-create.cpp +0 -40
- package/src/llama.cpp/examples/lookup/lookup-merge.cpp +0 -47
- package/src/llama.cpp/examples/lookup/lookup-stats.cpp +0 -157
- package/src/llama.cpp/examples/lookup/lookup.cpp +0 -242
- package/src/llama.cpp/examples/parallel/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/parallel/parallel.cpp +0 -492
- package/src/llama.cpp/examples/passkey/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/passkey/passkey.cpp +0 -277
- package/src/llama.cpp/examples/retrieval/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/retrieval/retrieval.cpp +0 -304
- package/src/llama.cpp/examples/save-load-state/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +0 -246
- package/src/llama.cpp/examples/simple/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/simple/simple.cpp +0 -206
- package/src/llama.cpp/examples/simple-chat/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/simple-chat/simple-chat.cpp +0 -206
- package/src/llama.cpp/examples/simple-cmake-pkg/CMakeLists.txt +0 -11
- package/src/llama.cpp/examples/speculative/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/speculative/speculative.cpp +0 -644
- package/src/llama.cpp/examples/speculative-simple/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/speculative-simple/speculative-simple.cpp +0 -261
- package/src/llama.cpp/examples/sycl/CMakeLists.txt +0 -9
- package/src/llama.cpp/examples/sycl/build.sh +0 -23
- package/src/llama.cpp/examples/sycl/ls-sycl-device.cpp +0 -13
- package/src/llama.cpp/examples/sycl/run-llama2.sh +0 -27
- package/src/llama.cpp/examples/sycl/run-llama3.sh +0 -28
- package/src/llama.cpp/examples/sycl/win-build-sycl.bat +0 -33
- package/src/llama.cpp/examples/sycl/win-run-llama2.bat +0 -9
- package/src/llama.cpp/examples/sycl/win-run-llama3.bat +0 -9
- package/src/llama.cpp/examples/training/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/training/finetune.cpp +0 -96
- package/src/llama.cpp/ggml/cmake/GitVars.cmake +0 -22
- package/src/llama.cpp/ggml/cmake/common.cmake +0 -26
- package/src/llama.cpp/ggml/src/ggml-alloc.c +0 -1042
- package/src/llama.cpp/ggml/src/ggml-backend-impl.h +0 -255
- package/src/llama.cpp/ggml/src/ggml-backend-reg.cpp +0 -586
- package/src/llama.cpp/ggml/src/ggml-backend.cpp +0 -2008
- package/src/llama.cpp/ggml/src/ggml-blas/CMakeLists.txt +0 -87
- package/src/llama.cpp/ggml/src/ggml-blas/ggml-blas.cpp +0 -517
- package/src/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +0 -74
- package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +0 -179
- package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +0 -258
- package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +0 -2863
- package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +0 -1110
- package/src/llama.cpp/ggml/src/ggml-cann/common.h +0 -420
- package/src/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +0 -2570
- package/src/llama.cpp/ggml/src/ggml-common.h +0 -1857
- package/src/llama.cpp/ggml/src/ggml-cpu/cmake/FindSIMD.cmake +0 -100
- package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +0 -184
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/cuda.h +0 -15
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +0 -243
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +0 -140
- package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +0 -131
- package/src/llama.cpp/ggml/src/ggml-impl.h +0 -601
- package/src/llama.cpp/ggml/src/ggml-kompute/CMakeLists.txt +0 -166
- package/src/llama.cpp/ggml/src/ggml-kompute/ggml-kompute.cpp +0 -2251
- package/src/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +0 -120
- package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +0 -622
- package/src/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +0 -113
- package/src/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +0 -96
- package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +0 -5124
- package/src/llama.cpp/ggml/src/ggml-opt.cpp +0 -1037
- package/src/llama.cpp/ggml/src/ggml-quants.c +0 -5232
- package/src/llama.cpp/ggml/src/ggml-quants.h +0 -100
- package/src/llama.cpp/ggml/src/ggml-rpc/CMakeLists.txt +0 -9
- package/src/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +0 -1813
- package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +0 -189
- package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +0 -37
- package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp +0 -239
- package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.hpp +0 -39
- package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +0 -83
- package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +0 -493
- package/src/llama.cpp/ggml/src/ggml-sycl/concat.cpp +0 -197
- package/src/llama.cpp/ggml/src/ggml-sycl/concat.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/conv.cpp +0 -100
- package/src/llama.cpp/ggml/src/ggml-sycl/conv.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +0 -623
- package/src/llama.cpp/ggml/src/ggml-sycl/convert.hpp +0 -34
- package/src/llama.cpp/ggml/src/ggml-sycl/cpy.cpp +0 -701
- package/src/llama.cpp/ggml/src/ggml-sycl/cpy.hpp +0 -11
- package/src/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +0 -791
- package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +0 -1160
- package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.hpp +0 -27
- package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +0 -2957
- package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +0 -1536
- package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +0 -75
- package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +0 -99
- package/src/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +0 -311
- package/src/llama.cpp/ggml/src/ggml-sycl/getrows.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +0 -4443
- package/src/llama.cpp/ggml/src/ggml-sycl/gla.cpp +0 -105
- package/src/llama.cpp/ggml/src/ggml-sycl/gla.hpp +0 -8
- package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +0 -136
- package/src/llama.cpp/ggml/src/ggml-sycl/im2col.hpp +0 -21
- package/src/llama.cpp/ggml/src/ggml-sycl/mmq.cpp +0 -3030
- package/src/llama.cpp/ggml/src/ggml-sycl/mmq.hpp +0 -33
- package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +0 -1108
- package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.hpp +0 -27
- package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +0 -474
- package/src/llama.cpp/ggml/src/ggml-sycl/norm.hpp +0 -26
- package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +0 -46
- package/src/llama.cpp/ggml/src/ggml-sycl/outprod.hpp +0 -10
- package/src/llama.cpp/ggml/src/ggml-sycl/presets.hpp +0 -74
- package/src/llama.cpp/ggml/src/ggml-sycl/quants.hpp +0 -83
- package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +0 -362
- package/src/llama.cpp/ggml/src/ggml-sycl/rope.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +0 -264
- package/src/llama.cpp/ggml/src/ggml-sycl/softmax.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.cpp +0 -13
- package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.hpp +0 -23
- package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +0 -73
- package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +0 -1215
- package/src/llama.cpp/ggml/src/ggml-sycl/wkv.cpp +0 -305
- package/src/llama.cpp/ggml/src/ggml-sycl/wkv.hpp +0 -10
- package/src/llama.cpp/ggml/src/ggml-threading.cpp +0 -12
- package/src/llama.cpp/ggml/src/ggml-threading.h +0 -14
- package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +0 -196
- package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +0 -10699
- package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +0 -39
- package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +0 -751
- package/src/llama.cpp/ggml/src/ggml.c +0 -6550
- package/src/llama.cpp/ggml/src/gguf.cpp +0 -1330
- package/src/llama.cpp/models/.editorconfig +0 -1
- package/src/llama.cpp/models/ggml-vocab-aquila.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-baichuan.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-command-r.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-command-r.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-command-r.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-falcon.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-falcon.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-falcon.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-gpt-neox.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-llama4.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-llama4.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-mpt.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-mpt.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-mpt.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-phi-3.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-qwen2.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-refact.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-refact.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-refact.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-starcoder.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.out +0 -46
- package/src/llama.cpp/pocs/CMakeLists.txt +0 -14
- package/src/llama.cpp/pocs/vdot/CMakeLists.txt +0 -9
- package/src/llama.cpp/pocs/vdot/q8dot.cpp +0 -173
- package/src/llama.cpp/pocs/vdot/vdot.cpp +0 -311
- package/src/llama.cpp/prompts/LLM-questions.txt +0 -49
- package/src/llama.cpp/prompts/alpaca.txt +0 -1
- package/src/llama.cpp/prompts/assistant.txt +0 -31
- package/src/llama.cpp/prompts/chat-with-baichuan.txt +0 -4
- package/src/llama.cpp/prompts/chat-with-bob.txt +0 -7
- package/src/llama.cpp/prompts/chat-with-qwen.txt +0 -1
- package/src/llama.cpp/prompts/chat-with-vicuna-v0.txt +0 -7
- package/src/llama.cpp/prompts/chat-with-vicuna-v1.txt +0 -7
- package/src/llama.cpp/prompts/chat.txt +0 -28
- package/src/llama.cpp/prompts/dan-modified.txt +0 -1
- package/src/llama.cpp/prompts/dan.txt +0 -1
- package/src/llama.cpp/prompts/mnemonics.txt +0 -93
- package/src/llama.cpp/prompts/parallel-questions.txt +0 -43
- package/src/llama.cpp/prompts/reason-act.txt +0 -18
- package/src/llama.cpp/requirements/requirements-all.txt +0 -15
- package/src/llama.cpp/requirements/requirements-compare-llama-bench.txt +0 -2
- package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt +0 -7
- package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf_update.txt +0 -7
- package/src/llama.cpp/requirements/requirements-convert_legacy_llama.txt +0 -5
- package/src/llama.cpp/requirements/requirements-convert_llama_ggml_to_gguf.txt +0 -1
- package/src/llama.cpp/requirements/requirements-convert_lora_to_gguf.txt +0 -4
- package/src/llama.cpp/requirements/requirements-gguf_editor_gui.txt +0 -3
- package/src/llama.cpp/requirements/requirements-pydantic.txt +0 -3
- package/src/llama.cpp/requirements/requirements-test-tokenizer-random.txt +0 -1
- package/src/llama.cpp/requirements/requirements-tool_bench.txt +0 -12
- package/src/llama.cpp/requirements.txt +0 -13
- package/src/llama.cpp/scripts/build-info.sh +0 -30
- package/src/llama.cpp/scripts/install-oneapi.bat +0 -19
- package/src/llama.cpp/scripts/xxd.cmake +0 -16
- package/src/llama.cpp/tests/CMakeLists.txt +0 -177
- package/src/llama.cpp/tests/get-model.cpp +0 -21
- package/src/llama.cpp/tests/get-model.h +0 -2
- package/src/llama.cpp/tests/test-arg-parser.cpp +0 -178
- package/src/llama.cpp/tests/test-autorelease.cpp +0 -24
- package/src/llama.cpp/tests/test-backend-ops.cpp +0 -4793
- package/src/llama.cpp/tests/test-barrier.cpp +0 -94
- package/src/llama.cpp/tests/test-c.c +0 -7
- package/src/llama.cpp/tests/test-chat-template.cpp +0 -417
- package/src/llama.cpp/tests/test-chat.cpp +0 -985
- package/src/llama.cpp/tests/test-double-float.cpp +0 -57
- package/src/llama.cpp/tests/test-gbnf-validator.cpp +0 -109
- package/src/llama.cpp/tests/test-gguf.cpp +0 -1338
- package/src/llama.cpp/tests/test-grammar-integration.cpp +0 -1308
- package/src/llama.cpp/tests/test-grammar-llguidance.cpp +0 -1201
- package/src/llama.cpp/tests/test-grammar-parser.cpp +0 -519
- package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +0 -1304
- package/src/llama.cpp/tests/test-llama-grammar.cpp +0 -408
- package/src/llama.cpp/tests/test-log.cpp +0 -39
- package/src/llama.cpp/tests/test-model-load-cancel.cpp +0 -27
- package/src/llama.cpp/tests/test-mtmd-c-api.c +0 -63
- package/src/llama.cpp/tests/test-opt.cpp +0 -904
- package/src/llama.cpp/tests/test-quantize-fns.cpp +0 -186
- package/src/llama.cpp/tests/test-quantize-perf.cpp +0 -365
- package/src/llama.cpp/tests/test-quantize-stats.cpp +0 -424
- package/src/llama.cpp/tests/test-regex-partial.cpp +0 -288
- package/src/llama.cpp/tests/test-rope.cpp +0 -262
- package/src/llama.cpp/tests/test-sampling.cpp +0 -399
- package/src/llama.cpp/tests/test-tokenizer-0.cpp +0 -312
- package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +0 -155
- package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +0 -125
- package/src/llama.cpp/tools/CMakeLists.txt +0 -39
- package/src/llama.cpp/tools/batched-bench/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/batched-bench/batched-bench.cpp +0 -204
- package/src/llama.cpp/tools/cvector-generator/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/cvector-generator/completions.txt +0 -582
- package/src/llama.cpp/tools/cvector-generator/cvector-generator.cpp +0 -508
- package/src/llama.cpp/tools/cvector-generator/mean.hpp +0 -48
- package/src/llama.cpp/tools/cvector-generator/negative.txt +0 -4
- package/src/llama.cpp/tools/cvector-generator/pca.hpp +0 -315
- package/src/llama.cpp/tools/cvector-generator/positive.txt +0 -4
- package/src/llama.cpp/tools/export-lora/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/export-lora/export-lora.cpp +0 -434
- package/src/llama.cpp/tools/gguf-split/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/gguf-split/gguf-split.cpp +0 -583
- package/src/llama.cpp/tools/imatrix/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/imatrix/imatrix.cpp +0 -667
- package/src/llama.cpp/tools/llama-bench/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/llama-bench/llama-bench.cpp +0 -2024
- package/src/llama.cpp/tools/main/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/main/main.cpp +0 -977
- package/src/llama.cpp/tools/mtmd/CMakeLists.txt +0 -58
- package/src/llama.cpp/tools/mtmd/clip-impl.h +0 -462
- package/src/llama.cpp/tools/mtmd/clip.cpp +0 -4024
- package/src/llama.cpp/tools/mtmd/clip.h +0 -101
- package/src/llama.cpp/tools/mtmd/deprecation-warning.cpp +0 -22
- package/src/llama.cpp/tools/mtmd/miniaudio.h +0 -93468
- package/src/llama.cpp/tools/mtmd/mtmd-audio.cpp +0 -855
- package/src/llama.cpp/tools/mtmd/mtmd-audio.h +0 -62
- package/src/llama.cpp/tools/mtmd/mtmd-cli.cpp +0 -377
- package/src/llama.cpp/tools/mtmd/mtmd-helper.cpp +0 -297
- package/src/llama.cpp/tools/mtmd/mtmd.cpp +0 -942
- package/src/llama.cpp/tools/mtmd/mtmd.h +0 -362
- package/src/llama.cpp/tools/mtmd/requirements.txt +0 -5
- package/src/llama.cpp/tools/perplexity/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/perplexity/perplexity.cpp +0 -2063
- package/src/llama.cpp/tools/quantize/CMakeLists.txt +0 -6
- package/src/llama.cpp/tools/quantize/quantize.cpp +0 -519
- package/src/llama.cpp/tools/rpc/CMakeLists.txt +0 -4
- package/src/llama.cpp/tools/rpc/rpc-server.cpp +0 -322
- package/src/llama.cpp/tools/run/CMakeLists.txt +0 -16
- package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.cpp +0 -1995
- package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.h +0 -137
- package/src/llama.cpp/tools/run/run.cpp +0 -1261
- package/src/llama.cpp/tools/server/CMakeLists.txt +0 -51
- package/src/llama.cpp/tools/server/bench/requirements.txt +0 -2
- package/src/llama.cpp/tools/server/httplib.h +0 -10506
- package/src/llama.cpp/tools/server/server.cpp +0 -4966
- package/src/llama.cpp/tools/server/tests/requirements.txt +0 -8
- package/src/llama.cpp/tools/server/utils.hpp +0 -1337
- package/src/llama.cpp/tools/tokenize/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/tokenize/tokenize.cpp +0 -416
- package/src/llama.cpp/tools/tts/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/tts/tts.cpp +0 -1092
|
@@ -1,1338 +0,0 @@
|
|
|
1
|
-
#include "ggml.h"
|
|
2
|
-
#include "ggml-backend.h"
|
|
3
|
-
#include "../ggml/src/ggml-impl.h"
|
|
4
|
-
|
|
5
|
-
#include <algorithm>
|
|
6
|
-
#include <array>
|
|
7
|
-
#include <cstdint>
|
|
8
|
-
#include <cstdio>
|
|
9
|
-
#include <random>
|
|
10
|
-
#include <string>
|
|
11
|
-
#include <vector>
|
|
12
|
-
|
|
13
|
-
constexpr int offset_has_kv = 1000;
|
|
14
|
-
constexpr int offset_has_tensors = 2000;
|
|
15
|
-
constexpr int offset_has_data = 3000;
|
|
16
|
-
|
|
17
|
-
enum handcrafted_file_type {
|
|
18
|
-
HANDCRAFTED_HEADER_BAD_MAGIC = 10,
|
|
19
|
-
HANDCRAFTED_HEADER_BAD_VERSION_1 = 20,
|
|
20
|
-
HANDCRAFTED_HEADER_BAD_VERSION_FUTURE = 30,
|
|
21
|
-
HANDCRAFTED_HEADER_BAD_N_TENSORS = 40,
|
|
22
|
-
HANDCRAFTED_HEADER_BAD_N_KV = 50,
|
|
23
|
-
HANDCRAFTED_HEADER_EMPTY = 800,
|
|
24
|
-
|
|
25
|
-
HANDCRAFTED_KV_BAD_KEY_SIZE = 10 + offset_has_kv,
|
|
26
|
-
HANDCRAFTED_KV_BAD_TYPE = 20 + offset_has_kv,
|
|
27
|
-
// HANDCRAFTED_KV_BAD_VALUE_SIZE = 30 + offset_has_kv, // removed because it can result in allocations > 1 TB (default sanitizer limit)
|
|
28
|
-
HANDCRAFTED_KV_DUPLICATE_KEY = 40 + offset_has_kv,
|
|
29
|
-
HANDCRAFTED_KV_BAD_ALIGN = 50 + offset_has_kv,
|
|
30
|
-
HANDCRAFTED_KV_SUCCESS = 800 + offset_has_kv,
|
|
31
|
-
|
|
32
|
-
HANDCRAFTED_TENSORS_BAD_NAME_SIZE = 10 + offset_has_tensors,
|
|
33
|
-
HANDCRAFTED_TENSORS_BAD_N_DIMS = 20 + offset_has_tensors,
|
|
34
|
-
HANDCRAFTED_TENSORS_BAD_SHAPE = 30 + offset_has_tensors,
|
|
35
|
-
HANDCRAFTED_TENSORS_NE_TOO_BIG = 40 + offset_has_tensors,
|
|
36
|
-
HANDCRAFTED_TENSORS_BAD_TYPE = 50 + offset_has_tensors,
|
|
37
|
-
HANDCRAFTED_TENSORS_BAD_OFFSET = 60 + offset_has_tensors,
|
|
38
|
-
HANDCRAFTED_TENSORS_DUPLICATE_NAME = 70 + offset_has_tensors,
|
|
39
|
-
HANDCRAFTED_TENSORS_BAD_ALIGN = 75 + offset_has_tensors,
|
|
40
|
-
HANDCRAFTED_TENSORS_INCONSISTENT_ALIGN = 80 + offset_has_tensors,
|
|
41
|
-
HANDCRAFTED_TENSORS_SUCCESS = 800 + offset_has_tensors,
|
|
42
|
-
HANDCRAFTED_TENSORS_CUSTOM_ALIGN = 810 + offset_has_tensors,
|
|
43
|
-
|
|
44
|
-
HANDCRAFTED_DATA_NOT_ENOUGH_DATA = 10 + offset_has_data,
|
|
45
|
-
HANDCRAFTED_DATA_BAD_ALIGN = 15 + offset_has_data,
|
|
46
|
-
HANDCRAFTED_DATA_INCONSISTENT_ALIGN = 20 + offset_has_data,
|
|
47
|
-
HANDCRAFTED_DATA_SUCCESS = 800 + offset_has_data,
|
|
48
|
-
HANDCRAFTED_DATA_CUSTOM_ALIGN = 810 + offset_has_data,
|
|
49
|
-
};
|
|
50
|
-
|
|
51
|
-
static std::string handcrafted_file_type_name(const enum handcrafted_file_type hft) {
|
|
52
|
-
switch (hft) {
|
|
53
|
-
case HANDCRAFTED_HEADER_BAD_MAGIC: return "HEADER_BAD_MAGIC";
|
|
54
|
-
case HANDCRAFTED_HEADER_BAD_VERSION_1: return "HEADER_BAD_VERSION_1";
|
|
55
|
-
case HANDCRAFTED_HEADER_BAD_VERSION_FUTURE: return "HEADER_BAD_VERSION_FUTURE";
|
|
56
|
-
case HANDCRAFTED_HEADER_BAD_N_KV: return "HEADER_BAD_N_KV";
|
|
57
|
-
case HANDCRAFTED_HEADER_BAD_N_TENSORS: return "HEADER_BAD_N_TENSORS";
|
|
58
|
-
case HANDCRAFTED_HEADER_EMPTY: return "HEADER_EMPTY";
|
|
59
|
-
|
|
60
|
-
case HANDCRAFTED_KV_BAD_KEY_SIZE: return "KV_BAD_KEY_SIZE";
|
|
61
|
-
case HANDCRAFTED_KV_BAD_TYPE: return "KV_BAD_TYPE";
|
|
62
|
-
case HANDCRAFTED_KV_DUPLICATE_KEY: return "KV_DUPLICATE_KEY";
|
|
63
|
-
case HANDCRAFTED_KV_BAD_ALIGN: return "KV_BAD_ALIGN";
|
|
64
|
-
case HANDCRAFTED_KV_SUCCESS: return "KV_RANDOM_KV";
|
|
65
|
-
|
|
66
|
-
case HANDCRAFTED_TENSORS_BAD_NAME_SIZE: return "TENSORS_BAD_NAME_SIZE";
|
|
67
|
-
case HANDCRAFTED_TENSORS_BAD_N_DIMS: return "TENSORS_BAD_N_DIMS";
|
|
68
|
-
case HANDCRAFTED_TENSORS_BAD_SHAPE: return "TENSORS_BAD_SHAPE";
|
|
69
|
-
case HANDCRAFTED_TENSORS_NE_TOO_BIG: return "TENSORS_NE_TOO_BIG";
|
|
70
|
-
case HANDCRAFTED_TENSORS_BAD_TYPE: return "TENSORS_BAD_TYPE";
|
|
71
|
-
case HANDCRAFTED_TENSORS_BAD_OFFSET: return "TENSORS_BAD_OFFSET";
|
|
72
|
-
case HANDCRAFTED_TENSORS_DUPLICATE_NAME: return "TENSORS_DUPLICATE_NAME";
|
|
73
|
-
case HANDCRAFTED_TENSORS_BAD_ALIGN: return "TENSORS_BAD_ALIGN";
|
|
74
|
-
case HANDCRAFTED_TENSORS_INCONSISTENT_ALIGN: return "TENSORS_INCONSISTENT_ALIGN";
|
|
75
|
-
case HANDCRAFTED_TENSORS_SUCCESS: return "TENSORS_SUCCESS";
|
|
76
|
-
case HANDCRAFTED_TENSORS_CUSTOM_ALIGN: return "TENSORS_CUSTOM_ALIGN";
|
|
77
|
-
|
|
78
|
-
case HANDCRAFTED_DATA_NOT_ENOUGH_DATA: return "DATA_NOT_ENOUGH_DATA";
|
|
79
|
-
case HANDCRAFTED_DATA_BAD_ALIGN: return "DATA_BAD_ALIGN";
|
|
80
|
-
case HANDCRAFTED_DATA_INCONSISTENT_ALIGN: return "DATA_INCONSISTENT_ALIGN";
|
|
81
|
-
case HANDCRAFTED_DATA_SUCCESS: return "DATA_SUCCESS";
|
|
82
|
-
case HANDCRAFTED_DATA_CUSTOM_ALIGN: return "DATA_CUSTOM_ALIGN";
|
|
83
|
-
}
|
|
84
|
-
GGML_ABORT("fatal error");
|
|
85
|
-
}
|
|
86
|
-
|
|
87
|
-
static bool expect_context_not_null(const enum handcrafted_file_type hft) {
|
|
88
|
-
if (hft < offset_has_kv) {
|
|
89
|
-
return hft >= HANDCRAFTED_HEADER_EMPTY;
|
|
90
|
-
}
|
|
91
|
-
if (hft < offset_has_tensors) {
|
|
92
|
-
return hft >= HANDCRAFTED_KV_SUCCESS;
|
|
93
|
-
}
|
|
94
|
-
if (hft < offset_has_data) {
|
|
95
|
-
return hft >= HANDCRAFTED_TENSORS_SUCCESS;
|
|
96
|
-
}
|
|
97
|
-
return hft >= HANDCRAFTED_DATA_SUCCESS;
|
|
98
|
-
}
|
|
99
|
-
|
|
100
|
-
typedef std::pair<enum ggml_type, std::array<int64_t, GGML_MAX_DIMS>> tensor_config_t;
|
|
101
|
-
|
|
102
|
-
static std::vector<tensor_config_t> get_tensor_configs(std::mt19937 & rng) {
|
|
103
|
-
std::vector<tensor_config_t> tensor_configs;
|
|
104
|
-
tensor_configs.reserve(100);
|
|
105
|
-
|
|
106
|
-
for (int i = 0; i < 100; ++i) {
|
|
107
|
-
const enum ggml_type type = ggml_type(rng() % GGML_TYPE_COUNT);
|
|
108
|
-
if (ggml_type_size(type) == 0) {
|
|
109
|
-
continue;
|
|
110
|
-
}
|
|
111
|
-
|
|
112
|
-
std::array<int64_t, GGML_MAX_DIMS> shape = {1, 1, 1, 1};
|
|
113
|
-
shape[0] = (1 + rng() % 10) * ggml_blck_size(type);
|
|
114
|
-
const int n_dims = 1 + rng() % GGML_MAX_DIMS;
|
|
115
|
-
for (int i = 1; i < n_dims; ++i) {
|
|
116
|
-
shape[i] = 1 + rng() % 10;
|
|
117
|
-
}
|
|
118
|
-
|
|
119
|
-
tensor_configs.push_back(std::make_pair(type, shape));
|
|
120
|
-
}
|
|
121
|
-
|
|
122
|
-
return tensor_configs;
|
|
123
|
-
}
|
|
124
|
-
|
|
125
|
-
static std::vector<std::pair<enum gguf_type, enum gguf_type>> get_kv_types(std::mt19937 rng) {
|
|
126
|
-
std::vector<std::pair<enum gguf_type, enum gguf_type>> kv_types;
|
|
127
|
-
kv_types.reserve(100);
|
|
128
|
-
|
|
129
|
-
for (int i = 0; i < 100; ++i) {
|
|
130
|
-
const gguf_type type = gguf_type(rng() % GGUF_TYPE_COUNT);
|
|
131
|
-
|
|
132
|
-
if (type == GGUF_TYPE_ARRAY) {
|
|
133
|
-
const gguf_type type_arr = gguf_type(rng() % GGUF_TYPE_COUNT);
|
|
134
|
-
if (type_arr == GGUF_TYPE_ARRAY) {
|
|
135
|
-
continue;
|
|
136
|
-
}
|
|
137
|
-
kv_types.push_back(std::make_pair(type, type_arr));
|
|
138
|
-
continue;
|
|
139
|
-
}
|
|
140
|
-
|
|
141
|
-
kv_types.push_back(std::make_pair(type, gguf_type(-1)));
|
|
142
|
-
}
|
|
143
|
-
std::shuffle(kv_types.begin(), kv_types.end(), rng);
|
|
144
|
-
|
|
145
|
-
return kv_types;
|
|
146
|
-
}
|
|
147
|
-
|
|
148
|
-
template <typename T>
|
|
149
|
-
static void helper_write(FILE * file, const T & val) {
|
|
150
|
-
GGML_ASSERT(fwrite(&val, 1, sizeof(val), file) == sizeof(val));
|
|
151
|
-
}
|
|
152
|
-
|
|
153
|
-
static void helper_write(FILE * file, const void * data, const size_t nbytes) {
|
|
154
|
-
GGML_ASSERT(fwrite(data, 1, nbytes, file) == nbytes);
|
|
155
|
-
}
|
|
156
|
-
|
|
157
|
-
static FILE * get_handcrafted_file(const unsigned int seed, const enum handcrafted_file_type hft, const int extra_bytes = 0) {
|
|
158
|
-
FILE * file = tmpfile();
|
|
159
|
-
|
|
160
|
-
if (!file) {
|
|
161
|
-
return file;
|
|
162
|
-
}
|
|
163
|
-
|
|
164
|
-
std::mt19937 rng(seed);
|
|
165
|
-
uint32_t alignment = GGUF_DEFAULT_ALIGNMENT;
|
|
166
|
-
|
|
167
|
-
if (hft == HANDCRAFTED_HEADER_BAD_MAGIC) {
|
|
168
|
-
const char bad_magic[4] = {'F', 'U', 'G', 'G'};
|
|
169
|
-
helper_write(file, bad_magic, sizeof(bad_magic));
|
|
170
|
-
} else {
|
|
171
|
-
helper_write(file, GGUF_MAGIC, 4);
|
|
172
|
-
}
|
|
173
|
-
|
|
174
|
-
if (hft == HANDCRAFTED_HEADER_BAD_VERSION_1) {
|
|
175
|
-
const uint32_t version = 1;
|
|
176
|
-
helper_write(file, version);
|
|
177
|
-
} else if (hft == HANDCRAFTED_HEADER_BAD_VERSION_FUTURE) {
|
|
178
|
-
const uint32_t version = GGUF_VERSION + 1;
|
|
179
|
-
helper_write(file, version);
|
|
180
|
-
} else {
|
|
181
|
-
const uint32_t version = GGUF_VERSION;
|
|
182
|
-
helper_write(file, version);
|
|
183
|
-
}
|
|
184
|
-
|
|
185
|
-
std::vector<tensor_config_t> tensor_configs;
|
|
186
|
-
if (hft >= offset_has_tensors) {
|
|
187
|
-
tensor_configs = get_tensor_configs(rng);
|
|
188
|
-
}
|
|
189
|
-
|
|
190
|
-
if (hft == HANDCRAFTED_HEADER_BAD_N_TENSORS) {
|
|
191
|
-
const uint64_t n_tensors = -1;
|
|
192
|
-
helper_write(file, n_tensors);
|
|
193
|
-
} else {
|
|
194
|
-
const uint64_t n_tensors = tensor_configs.size();
|
|
195
|
-
helper_write(file, n_tensors);
|
|
196
|
-
}
|
|
197
|
-
|
|
198
|
-
std::vector<std::pair<enum gguf_type, enum gguf_type>> kv_types;
|
|
199
|
-
if (hft >= offset_has_kv) {
|
|
200
|
-
kv_types = get_kv_types(rng);
|
|
201
|
-
}
|
|
202
|
-
{
|
|
203
|
-
uint64_t n_kv = kv_types.size();
|
|
204
|
-
if (hft == HANDCRAFTED_KV_BAD_ALIGN ||
|
|
205
|
-
hft == HANDCRAFTED_TENSORS_BAD_ALIGN || hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN ||
|
|
206
|
-
hft == HANDCRAFTED_DATA_BAD_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) {
|
|
207
|
-
|
|
208
|
-
n_kv += 1;
|
|
209
|
-
} else if (hft == HANDCRAFTED_HEADER_BAD_N_KV) {
|
|
210
|
-
n_kv = -1;
|
|
211
|
-
}
|
|
212
|
-
helper_write(file, n_kv);
|
|
213
|
-
}
|
|
214
|
-
|
|
215
|
-
if (hft < offset_has_kv) {
|
|
216
|
-
while (ftell(file) % alignment != 0) {
|
|
217
|
-
const char pad = 0;
|
|
218
|
-
helper_write(file, pad);
|
|
219
|
-
}
|
|
220
|
-
|
|
221
|
-
for (int i = 0; i < extra_bytes; ++i) {
|
|
222
|
-
const char tmp = 0;
|
|
223
|
-
helper_write(file, tmp);
|
|
224
|
-
}
|
|
225
|
-
rewind(file);
|
|
226
|
-
return file;
|
|
227
|
-
}
|
|
228
|
-
|
|
229
|
-
for (int i = 0; i < int(kv_types.size()); ++i) {
|
|
230
|
-
const enum gguf_type type = gguf_type(hft == HANDCRAFTED_KV_BAD_TYPE ? GGUF_TYPE_COUNT : kv_types[i].first);
|
|
231
|
-
const enum gguf_type type_arr = gguf_type(hft == HANDCRAFTED_KV_BAD_TYPE ? GGUF_TYPE_COUNT : kv_types[i].second);
|
|
232
|
-
|
|
233
|
-
const std::string key = "my_key_" + std::to_string((hft == HANDCRAFTED_KV_DUPLICATE_KEY ? i/2 : i));
|
|
234
|
-
|
|
235
|
-
if (hft == HANDCRAFTED_KV_BAD_KEY_SIZE) {
|
|
236
|
-
const uint64_t n = -1;
|
|
237
|
-
helper_write(file, n);
|
|
238
|
-
} else {
|
|
239
|
-
const uint64_t n = key.length();
|
|
240
|
-
helper_write(file, n);
|
|
241
|
-
}
|
|
242
|
-
helper_write(file, key.data(), key.length());
|
|
243
|
-
|
|
244
|
-
{
|
|
245
|
-
const int32_t type32 = int32_t(type);
|
|
246
|
-
helper_write(file, type32);
|
|
247
|
-
}
|
|
248
|
-
|
|
249
|
-
uint32_t data[16];
|
|
250
|
-
for (int j = 0; j < 16; ++j) {
|
|
251
|
-
data[j] = rng();
|
|
252
|
-
if (type == GGUF_TYPE_STRING || type_arr == GGUF_TYPE_STRING) {
|
|
253
|
-
data[j] |= 0x01010101; // avoid random null-termination of string
|
|
254
|
-
}
|
|
255
|
-
}
|
|
256
|
-
|
|
257
|
-
if (type == GGUF_TYPE_STRING) {
|
|
258
|
-
const uint64_t n = rng() % sizeof(data);
|
|
259
|
-
helper_write(file, n);
|
|
260
|
-
helper_write(file, data, n);
|
|
261
|
-
continue;
|
|
262
|
-
}
|
|
263
|
-
|
|
264
|
-
if (type == GGUF_TYPE_ARRAY) {
|
|
265
|
-
{
|
|
266
|
-
const int32_t type32 = int32_t(type_arr);
|
|
267
|
-
helper_write(file, type32);
|
|
268
|
-
}
|
|
269
|
-
if (type_arr == GGUF_TYPE_STRING) {
|
|
270
|
-
const uint64_t nstr = rng() % (16 + 1);
|
|
271
|
-
helper_write(file, nstr);
|
|
272
|
-
for (uint64_t istr = 0; istr < nstr; ++istr) {
|
|
273
|
-
const uint64_t n = rng() % (sizeof(uint32_t) + 1);
|
|
274
|
-
helper_write(file, n);
|
|
275
|
-
helper_write(file, &data[istr], n);
|
|
276
|
-
}
|
|
277
|
-
continue;
|
|
278
|
-
}
|
|
279
|
-
const size_t type_size = gguf_type_size(type_arr);
|
|
280
|
-
const uint64_t n = (rng() % sizeof(data)) / type_size;
|
|
281
|
-
helper_write(file, n);
|
|
282
|
-
helper_write(file, &data, n*type_size);
|
|
283
|
-
continue;
|
|
284
|
-
}
|
|
285
|
-
|
|
286
|
-
helper_write(file, data, hft == HANDCRAFTED_KV_BAD_TYPE ? 1 : gguf_type_size(type));
|
|
287
|
-
}
|
|
288
|
-
|
|
289
|
-
if (hft == HANDCRAFTED_KV_BAD_ALIGN ||
|
|
290
|
-
hft == HANDCRAFTED_TENSORS_BAD_ALIGN || hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN ||
|
|
291
|
-
hft == HANDCRAFTED_DATA_BAD_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) {
|
|
292
|
-
|
|
293
|
-
const uint64_t n = strlen(GGUF_KEY_GENERAL_ALIGNMENT);
|
|
294
|
-
helper_write(file, n);
|
|
295
|
-
helper_write(file, GGUF_KEY_GENERAL_ALIGNMENT, n);
|
|
296
|
-
|
|
297
|
-
const int32_t type = gguf_type(GGUF_TYPE_UINT32);
|
|
298
|
-
helper_write(file, type);
|
|
299
|
-
|
|
300
|
-
alignment = expect_context_not_null(hft) ? 1 : 13;
|
|
301
|
-
helper_write(file, alignment);
|
|
302
|
-
}
|
|
303
|
-
|
|
304
|
-
if (hft < offset_has_tensors) {
|
|
305
|
-
while (ftell(file) % alignment != 0) {
|
|
306
|
-
const char pad = 0;
|
|
307
|
-
helper_write(file, pad);
|
|
308
|
-
}
|
|
309
|
-
|
|
310
|
-
for (int i = 0; i < extra_bytes; ++i) {
|
|
311
|
-
const char tmp = 0;
|
|
312
|
-
helper_write(file, tmp);
|
|
313
|
-
}
|
|
314
|
-
rewind(file);
|
|
315
|
-
return file;
|
|
316
|
-
}
|
|
317
|
-
|
|
318
|
-
if (hft == HANDCRAFTED_TENSORS_INCONSISTENT_ALIGN || hft == HANDCRAFTED_DATA_INCONSISTENT_ALIGN) {
|
|
319
|
-
alignment = 1;
|
|
320
|
-
}
|
|
321
|
-
|
|
322
|
-
uint64_t offset = 0;
|
|
323
|
-
for (int i = 0; i < int(tensor_configs.size()); ++i) {
|
|
324
|
-
const ggml_type type = tensor_configs[i].first;
|
|
325
|
-
const std::array<int64_t, GGML_MAX_DIMS> shape = tensor_configs[i].second;
|
|
326
|
-
|
|
327
|
-
std::string name = "my_tensor";
|
|
328
|
-
if (hft != HANDCRAFTED_TENSORS_DUPLICATE_NAME) {
|
|
329
|
-
name += "_" + std::to_string(i);
|
|
330
|
-
}
|
|
331
|
-
if (hft == HANDCRAFTED_TENSORS_BAD_NAME_SIZE) {
|
|
332
|
-
name += "_with_a_very_long_name_which_is_longer_than_what_is_allowed_for_ggml_tensors";
|
|
333
|
-
GGML_ASSERT(name.length() >= GGML_MAX_NAME);
|
|
334
|
-
}
|
|
335
|
-
{
|
|
336
|
-
const uint64_t n = name.length();
|
|
337
|
-
helper_write(file, n);
|
|
338
|
-
}
|
|
339
|
-
helper_write(file, name.data(), name.length());
|
|
340
|
-
|
|
341
|
-
uint32_t n_dims = hft == HANDCRAFTED_TENSORS_NE_TOO_BIG ? 2 : 1;
|
|
342
|
-
for (int i = GGML_MAX_DIMS-1; i >= 1; --i) {
|
|
343
|
-
if (shape[i] != 1) {
|
|
344
|
-
n_dims = i + 1;
|
|
345
|
-
break;
|
|
346
|
-
}
|
|
347
|
-
}
|
|
348
|
-
if (hft == HANDCRAFTED_TENSORS_BAD_N_DIMS) {
|
|
349
|
-
const uint32_t n_dims_bad = GGML_MAX_DIMS + 1;
|
|
350
|
-
helper_write(file, n_dims_bad);
|
|
351
|
-
} else {
|
|
352
|
-
helper_write(file, n_dims);
|
|
353
|
-
}
|
|
354
|
-
|
|
355
|
-
if (hft == HANDCRAFTED_TENSORS_BAD_SHAPE) {
|
|
356
|
-
for (uint32_t j = 0; j < n_dims; ++j) {
|
|
357
|
-
const int64_t bad_dim = -1;
|
|
358
|
-
helper_write(file, bad_dim);
|
|
359
|
-
}
|
|
360
|
-
} else if (hft == HANDCRAFTED_TENSORS_NE_TOO_BIG){
|
|
361
|
-
for (uint32_t j = 0; j < n_dims; ++j) {
|
|
362
|
-
const int64_t big_dim = 4*int64_t(INT32_MAX);
|
|
363
|
-
helper_write(file, big_dim);
|
|
364
|
-
}
|
|
365
|
-
} else {
|
|
366
|
-
helper_write(file, shape.data(), n_dims*sizeof(int64_t));
|
|
367
|
-
}
|
|
368
|
-
|
|
369
|
-
{
|
|
370
|
-
const int32_t type32 = hft == HANDCRAFTED_TENSORS_BAD_TYPE ? GGML_TYPE_COUNT : int32_t(type);
|
|
371
|
-
helper_write(file, type32);
|
|
372
|
-
}
|
|
373
|
-
|
|
374
|
-
if (hft == HANDCRAFTED_TENSORS_BAD_OFFSET) {
|
|
375
|
-
const uint64_t bad_offset = -1;
|
|
376
|
-
helper_write(file, bad_offset);
|
|
377
|
-
} else {
|
|
378
|
-
helper_write(file, offset);
|
|
379
|
-
}
|
|
380
|
-
|
|
381
|
-
int64_t ne = shape[0];
|
|
382
|
-
for (uint32_t i = 1; i < n_dims; ++i) {
|
|
383
|
-
ne *= shape[i];
|
|
384
|
-
}
|
|
385
|
-
offset += GGML_PAD(ggml_row_size(type, ne), alignment);
|
|
386
|
-
}
|
|
387
|
-
|
|
388
|
-
while (ftell(file) % alignment != 0) {
|
|
389
|
-
const char pad = 0;
|
|
390
|
-
helper_write(file, pad);
|
|
391
|
-
}
|
|
392
|
-
|
|
393
|
-
if (hft >= offset_has_data) {
|
|
394
|
-
rng.seed(seed + 1);
|
|
395
|
-
uint64_t nbytes = offset;
|
|
396
|
-
if (hft == HANDCRAFTED_DATA_NOT_ENOUGH_DATA) {
|
|
397
|
-
nbytes -= 1;
|
|
398
|
-
}
|
|
399
|
-
for (uint64_t i = 0; i < nbytes; ++i) {
|
|
400
|
-
const uint8_t random_byte = i % 256;
|
|
401
|
-
helper_write(file, random_byte);
|
|
402
|
-
}
|
|
403
|
-
}
|
|
404
|
-
|
|
405
|
-
for (int i = 0; i < extra_bytes; ++i) {
|
|
406
|
-
const char tmp = 0;
|
|
407
|
-
helper_write(file, tmp);
|
|
408
|
-
}
|
|
409
|
-
rewind(file);
|
|
410
|
-
return file;
|
|
411
|
-
}
|
|
412
|
-
|
|
413
|
-
static bool handcrafted_check_header(const gguf_context * gguf_ctx, const unsigned int seed, const bool has_kv, const bool has_tensors, const bool alignment_defined) {
|
|
414
|
-
if (!gguf_ctx) {
|
|
415
|
-
return false;
|
|
416
|
-
}
|
|
417
|
-
|
|
418
|
-
std::mt19937 rng(seed);
|
|
419
|
-
|
|
420
|
-
std::vector<tensor_config_t> tensor_configs;
|
|
421
|
-
if (has_tensors) {
|
|
422
|
-
tensor_configs = get_tensor_configs(rng);
|
|
423
|
-
}
|
|
424
|
-
std::vector<std::pair<enum gguf_type, enum gguf_type>> kv_types;
|
|
425
|
-
if (has_kv) {
|
|
426
|
-
kv_types = get_kv_types(rng);
|
|
427
|
-
}
|
|
428
|
-
|
|
429
|
-
bool ok = true;
|
|
430
|
-
|
|
431
|
-
if (gguf_get_version(gguf_ctx) != GGUF_VERSION) {
|
|
432
|
-
ok = false;
|
|
433
|
-
}
|
|
434
|
-
if (gguf_get_n_tensors(gguf_ctx) != int(tensor_configs.size())) {
|
|
435
|
-
ok = false;
|
|
436
|
-
}
|
|
437
|
-
if (gguf_get_n_kv(gguf_ctx) != int(alignment_defined ? kv_types.size() + 1 : kv_types.size())) {
|
|
438
|
-
ok = false;
|
|
439
|
-
}
|
|
440
|
-
|
|
441
|
-
return ok;
|
|
442
|
-
}
|
|
443
|
-
|
|
444
|
-
static bool handcrafted_check_kv(const gguf_context * gguf_ctx, const unsigned int seed, const bool has_tensors, const bool alignment_defined) {
|
|
445
|
-
if (!gguf_ctx) {
|
|
446
|
-
return false;
|
|
447
|
-
}
|
|
448
|
-
|
|
449
|
-
std::mt19937 rng(seed);
|
|
450
|
-
|
|
451
|
-
std::vector<tensor_config_t> tensor_configs;
|
|
452
|
-
if (has_tensors) {
|
|
453
|
-
tensor_configs = get_tensor_configs(rng);
|
|
454
|
-
}
|
|
455
|
-
|
|
456
|
-
std::vector<std::pair<enum gguf_type, enum gguf_type>> kv_types = get_kv_types(rng);
|
|
457
|
-
|
|
458
|
-
bool ok = true;
|
|
459
|
-
|
|
460
|
-
for (int i = 0; i < int(kv_types.size()); ++i) {
|
|
461
|
-
const enum gguf_type type = gguf_type(kv_types[i].first);
|
|
462
|
-
const enum gguf_type type_arr = gguf_type(kv_types[i].second);
|
|
463
|
-
|
|
464
|
-
const std::string key = "my_key_" + std::to_string(i);
|
|
465
|
-
|
|
466
|
-
uint32_t data[16];
|
|
467
|
-
for (int j = 0; j < 16; ++j) {
|
|
468
|
-
data[j] = rng();
|
|
469
|
-
if (type == GGUF_TYPE_STRING || type_arr == GGUF_TYPE_STRING) {
|
|
470
|
-
data[j] |= 0x01010101; // avoid random null-termination of string
|
|
471
|
-
}
|
|
472
|
-
}
|
|
473
|
-
|
|
474
|
-
const char * data8 = reinterpret_cast<const char *>(data);
|
|
475
|
-
const int id = gguf_find_key(gguf_ctx, key.c_str());
|
|
476
|
-
|
|
477
|
-
if (type == GGUF_TYPE_STRING) {
|
|
478
|
-
const char * str = gguf_get_val_str(gguf_ctx, id);
|
|
479
|
-
const uint64_t n = strlen(str);
|
|
480
|
-
const uint64_t n_expected = rng() % sizeof(data);
|
|
481
|
-
if (n != n_expected) {
|
|
482
|
-
ok = false;
|
|
483
|
-
continue;
|
|
484
|
-
}
|
|
485
|
-
if (!std::equal(str, str + n, data8)) {
|
|
486
|
-
ok = false;
|
|
487
|
-
}
|
|
488
|
-
continue;
|
|
489
|
-
}
|
|
490
|
-
|
|
491
|
-
if (type == GGUF_TYPE_ARRAY) {
|
|
492
|
-
const size_t type_size = gguf_type_size(type_arr);
|
|
493
|
-
const uint64_t arr_n = gguf_get_arr_n(gguf_ctx, id);
|
|
494
|
-
|
|
495
|
-
if (type_arr == GGUF_TYPE_STRING) {
|
|
496
|
-
const uint64_t nstr_expected = rng() % (16 + 1);
|
|
497
|
-
if (arr_n != nstr_expected) {
|
|
498
|
-
ok = false;
|
|
499
|
-
continue;
|
|
500
|
-
}
|
|
501
|
-
for (uint64_t istr = 0; istr < nstr_expected; ++istr) {
|
|
502
|
-
const char * str = gguf_get_arr_str(gguf_ctx, id, istr);
|
|
503
|
-
const uint64_t n = strlen(str);
|
|
504
|
-
const uint64_t n_expected = rng() % (sizeof(uint32_t) + 1);
|
|
505
|
-
|
|
506
|
-
if (n != n_expected) {
|
|
507
|
-
ok = false;
|
|
508
|
-
continue;
|
|
509
|
-
}
|
|
510
|
-
const char * str_expected = reinterpret_cast<const char *>(&data[istr]);
|
|
511
|
-
if (strncmp(str, str_expected, n) != 0) {
|
|
512
|
-
ok = false;
|
|
513
|
-
continue;
|
|
514
|
-
}
|
|
515
|
-
}
|
|
516
|
-
continue;
|
|
517
|
-
}
|
|
518
|
-
|
|
519
|
-
const uint64_t arr_n_expected = (rng() % sizeof(data)) / type_size;
|
|
520
|
-
if (arr_n != arr_n_expected) {
|
|
521
|
-
ok = false;
|
|
522
|
-
continue;
|
|
523
|
-
}
|
|
524
|
-
|
|
525
|
-
const char * data_gguf = reinterpret_cast<const char *>(gguf_get_arr_data(gguf_ctx, id));
|
|
526
|
-
|
|
527
|
-
if (type_arr == GGUF_TYPE_BOOL) {
|
|
528
|
-
for (size_t arr_i = 0; arr_i < arr_n; ++arr_i) {
|
|
529
|
-
if (bool(data8[arr_i]) != bool(data_gguf[arr_i])) {
|
|
530
|
-
ok = false;
|
|
531
|
-
}
|
|
532
|
-
}
|
|
533
|
-
continue;
|
|
534
|
-
}
|
|
535
|
-
|
|
536
|
-
if (!std::equal(data8, data8 + arr_n*type_size, data_gguf)) {
|
|
537
|
-
ok = false;
|
|
538
|
-
}
|
|
539
|
-
continue;
|
|
540
|
-
}
|
|
541
|
-
|
|
542
|
-
const char * data_gguf = reinterpret_cast<const char *>(gguf_get_val_data(gguf_ctx, id));
|
|
543
|
-
|
|
544
|
-
if (type == GGUF_TYPE_BOOL) {
|
|
545
|
-
if (bool(*data8) != bool(*data_gguf)) {
|
|
546
|
-
ok = false;
|
|
547
|
-
}
|
|
548
|
-
continue;
|
|
549
|
-
}
|
|
550
|
-
|
|
551
|
-
if (!std::equal(data8, data8 + gguf_type_size(type), data_gguf)) {
|
|
552
|
-
ok = false;
|
|
553
|
-
}
|
|
554
|
-
}
|
|
555
|
-
|
|
556
|
-
const uint32_t expected_alignment = alignment_defined ? 1 : GGUF_DEFAULT_ALIGNMENT;
|
|
557
|
-
if (gguf_get_alignment(gguf_ctx) != expected_alignment) {
|
|
558
|
-
ok = false;
|
|
559
|
-
}
|
|
560
|
-
|
|
561
|
-
return ok;
|
|
562
|
-
}
|
|
563
|
-
|
|
564
|
-
static bool handcrafted_check_tensors(const gguf_context * gguf_ctx, const unsigned int seed) {
|
|
565
|
-
if (!gguf_ctx) {
|
|
566
|
-
return false;
|
|
567
|
-
}
|
|
568
|
-
|
|
569
|
-
std::mt19937 rng(seed);
|
|
570
|
-
|
|
571
|
-
std::vector<tensor_config_t> tensor_configs = get_tensor_configs(rng);
|
|
572
|
-
|
|
573
|
-
// Call get_kv_types to get the same RNG state:
|
|
574
|
-
get_kv_types(rng);
|
|
575
|
-
|
|
576
|
-
bool ok = true;
|
|
577
|
-
|
|
578
|
-
const int id_alignment = gguf_find_key(gguf_ctx, GGUF_KEY_GENERAL_ALIGNMENT);
|
|
579
|
-
const uint32_t alignment = id_alignment >= 0 ? gguf_get_val_u32(gguf_ctx, id_alignment) : GGUF_DEFAULT_ALIGNMENT;
|
|
580
|
-
|
|
581
|
-
uint64_t expected_offset = 0;
|
|
582
|
-
for (int i = 0; i < int(tensor_configs.size()); ++i) {
|
|
583
|
-
const ggml_type type = tensor_configs[i].first;
|
|
584
|
-
const std::array<int64_t, GGML_MAX_DIMS> shape = tensor_configs[i].second;
|
|
585
|
-
|
|
586
|
-
const std::string name = "my_tensor_" + std::to_string(i);
|
|
587
|
-
const int id = gguf_find_tensor(gguf_ctx, name.c_str());
|
|
588
|
-
|
|
589
|
-
if (id >= 0) {
|
|
590
|
-
if (std::string(gguf_get_tensor_name(gguf_ctx, id)) != name) {
|
|
591
|
-
ok = false;
|
|
592
|
-
}
|
|
593
|
-
|
|
594
|
-
if (gguf_get_tensor_type(gguf_ctx, id) != type) {
|
|
595
|
-
ok = false;
|
|
596
|
-
}
|
|
597
|
-
} else {
|
|
598
|
-
ok = false;
|
|
599
|
-
continue;
|
|
600
|
-
}
|
|
601
|
-
|
|
602
|
-
const size_t offset = gguf_get_tensor_offset(gguf_ctx, id);
|
|
603
|
-
|
|
604
|
-
if (offset != expected_offset) {
|
|
605
|
-
ok = false;
|
|
606
|
-
}
|
|
607
|
-
|
|
608
|
-
int64_t ne = shape[0];
|
|
609
|
-
for (size_t j = 1; j < GGML_MAX_DIMS; ++j) {
|
|
610
|
-
ne *= shape[j];
|
|
611
|
-
}
|
|
612
|
-
expected_offset += GGML_PAD(ggml_row_size(type, ne), alignment);
|
|
613
|
-
}
|
|
614
|
-
|
|
615
|
-
return ok;
|
|
616
|
-
}
|
|
617
|
-
|
|
618
|
-
static bool handcrafted_check_tensor_data(const gguf_context * gguf_ctx, const unsigned int seed, FILE * file) {
|
|
619
|
-
if (!gguf_ctx) {
|
|
620
|
-
return false;
|
|
621
|
-
}
|
|
622
|
-
|
|
623
|
-
std::mt19937 rng(seed);
|
|
624
|
-
|
|
625
|
-
std::vector<tensor_config_t> tensor_configs = get_tensor_configs(rng);
|
|
626
|
-
|
|
627
|
-
bool ok = true;
|
|
628
|
-
|
|
629
|
-
for (int i = 0; i < int(tensor_configs.size()); ++i) {
|
|
630
|
-
const ggml_type type = tensor_configs[i].first;
|
|
631
|
-
const std::array<int64_t, GGML_MAX_DIMS> shape = tensor_configs[i].second;
|
|
632
|
-
|
|
633
|
-
int64_t ne = shape[0];
|
|
634
|
-
for (size_t j = 1; j < GGML_MAX_DIMS; ++j) {
|
|
635
|
-
ne *= shape[j];
|
|
636
|
-
}
|
|
637
|
-
const size_t size = ggml_row_size(type, ne);
|
|
638
|
-
|
|
639
|
-
const std::string name = "my_tensor_" + std::to_string(i);
|
|
640
|
-
const size_t offset = gguf_get_tensor_offset(gguf_ctx, gguf_find_tensor(gguf_ctx, name.c_str()));
|
|
641
|
-
|
|
642
|
-
std::vector<uint8_t> data(size);
|
|
643
|
-
GGML_ASSERT(fseek(file, gguf_get_data_offset(gguf_ctx) + offset, SEEK_SET) == 0);
|
|
644
|
-
GGML_ASSERT(fread(data.data(), 1, data.size(), file) == data.size());
|
|
645
|
-
|
|
646
|
-
for (size_t j = 0; j < size; ++j) {
|
|
647
|
-
const uint8_t expected_byte = (j + offset) % 256;
|
|
648
|
-
if (data[j] != expected_byte) {
|
|
649
|
-
ok = false;
|
|
650
|
-
}
|
|
651
|
-
}
|
|
652
|
-
}
|
|
653
|
-
|
|
654
|
-
return ok;
|
|
655
|
-
}
|
|
656
|
-
|
|
657
|
-
static std::pair<int, int> test_handcrafted_file(const unsigned int seed) {
|
|
658
|
-
int npass = 0;
|
|
659
|
-
int ntest = 0;
|
|
660
|
-
|
|
661
|
-
const std::vector<handcrafted_file_type> hfts = {
|
|
662
|
-
HANDCRAFTED_HEADER_BAD_MAGIC,
|
|
663
|
-
HANDCRAFTED_HEADER_BAD_VERSION_1,
|
|
664
|
-
HANDCRAFTED_HEADER_BAD_VERSION_FUTURE,
|
|
665
|
-
HANDCRAFTED_HEADER_BAD_N_KV,
|
|
666
|
-
HANDCRAFTED_HEADER_BAD_N_TENSORS,
|
|
667
|
-
HANDCRAFTED_HEADER_EMPTY,
|
|
668
|
-
|
|
669
|
-
HANDCRAFTED_KV_BAD_KEY_SIZE,
|
|
670
|
-
HANDCRAFTED_KV_BAD_TYPE,
|
|
671
|
-
HANDCRAFTED_KV_DUPLICATE_KEY,
|
|
672
|
-
HANDCRAFTED_KV_BAD_ALIGN,
|
|
673
|
-
HANDCRAFTED_KV_SUCCESS,
|
|
674
|
-
|
|
675
|
-
HANDCRAFTED_TENSORS_BAD_NAME_SIZE,
|
|
676
|
-
HANDCRAFTED_TENSORS_BAD_N_DIMS,
|
|
677
|
-
HANDCRAFTED_TENSORS_BAD_SHAPE,
|
|
678
|
-
HANDCRAFTED_TENSORS_NE_TOO_BIG,
|
|
679
|
-
HANDCRAFTED_TENSORS_BAD_TYPE,
|
|
680
|
-
HANDCRAFTED_TENSORS_BAD_OFFSET,
|
|
681
|
-
HANDCRAFTED_TENSORS_DUPLICATE_NAME,
|
|
682
|
-
HANDCRAFTED_TENSORS_BAD_ALIGN,
|
|
683
|
-
HANDCRAFTED_TENSORS_INCONSISTENT_ALIGN,
|
|
684
|
-
HANDCRAFTED_TENSORS_SUCCESS,
|
|
685
|
-
HANDCRAFTED_TENSORS_CUSTOM_ALIGN,
|
|
686
|
-
|
|
687
|
-
HANDCRAFTED_DATA_NOT_ENOUGH_DATA,
|
|
688
|
-
HANDCRAFTED_DATA_BAD_ALIGN,
|
|
689
|
-
HANDCRAFTED_DATA_INCONSISTENT_ALIGN,
|
|
690
|
-
HANDCRAFTED_DATA_SUCCESS,
|
|
691
|
-
HANDCRAFTED_DATA_CUSTOM_ALIGN,
|
|
692
|
-
};
|
|
693
|
-
|
|
694
|
-
for (enum handcrafted_file_type hft : hfts) {
|
|
695
|
-
printf("%s: handcrafted_file_type=%s\n", __func__, handcrafted_file_type_name(hft).c_str());
|
|
696
|
-
FILE * file = get_handcrafted_file(seed, hft);
|
|
697
|
-
|
|
698
|
-
#ifdef _WIN32
|
|
699
|
-
if (!file) {
|
|
700
|
-
printf("failed to create tmpfile(), needs elevated privileges on Windows");
|
|
701
|
-
printf("skipping tests");
|
|
702
|
-
continue;
|
|
703
|
-
}
|
|
704
|
-
#else
|
|
705
|
-
GGML_ASSERT(file);
|
|
706
|
-
#endif // _WIN32
|
|
707
|
-
|
|
708
|
-
struct ggml_context * ctx = nullptr;
|
|
709
|
-
struct gguf_init_params gguf_params = {
|
|
710
|
-
/*no_alloc =*/ false,
|
|
711
|
-
/*ctx =*/ hft >= offset_has_data ? &ctx : nullptr,
|
|
712
|
-
};
|
|
713
|
-
|
|
714
|
-
struct gguf_context * gguf_ctx = gguf_init_from_file_impl(file, gguf_params);
|
|
715
|
-
|
|
716
|
-
if (expect_context_not_null(hft)) {
|
|
717
|
-
printf("%s: - context_not_null: ", __func__);
|
|
718
|
-
} else {
|
|
719
|
-
printf("%s: - context_null: ", __func__);
|
|
720
|
-
}
|
|
721
|
-
if (bool(gguf_ctx) == expect_context_not_null(hft)) {
|
|
722
|
-
printf("\033[1;32mOK\033[0m\n");
|
|
723
|
-
npass++;
|
|
724
|
-
} else {
|
|
725
|
-
printf("\033[1;31mFAIL\033[0m\n");
|
|
726
|
-
}
|
|
727
|
-
ntest++;
|
|
728
|
-
|
|
729
|
-
if (hft >= offset_has_data && !expect_context_not_null(hft)) {
|
|
730
|
-
printf("%s: - no_dangling_ggml_context_pointer: ", __func__);
|
|
731
|
-
if (ctx) {
|
|
732
|
-
printf("\033[1;31mFAIL\033[0m\n");
|
|
733
|
-
} else {
|
|
734
|
-
printf("\033[1;32mOK\033[0m\n");
|
|
735
|
-
npass++;
|
|
736
|
-
}
|
|
737
|
-
ntest++;
|
|
738
|
-
}
|
|
739
|
-
|
|
740
|
-
const bool alignment_defined = hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN;
|
|
741
|
-
|
|
742
|
-
if (expect_context_not_null(hft)) {
|
|
743
|
-
printf("%s: - check_header: ", __func__);
|
|
744
|
-
if (handcrafted_check_header(gguf_ctx, seed, hft >= offset_has_kv, hft >= offset_has_tensors, alignment_defined)) {
|
|
745
|
-
printf("\033[1;32mOK\033[0m\n");
|
|
746
|
-
npass++;
|
|
747
|
-
} else {
|
|
748
|
-
printf("\033[1;31mFAIL\033[0m\n");
|
|
749
|
-
}
|
|
750
|
-
ntest++;
|
|
751
|
-
}
|
|
752
|
-
|
|
753
|
-
if (expect_context_not_null(hft) && hft >= offset_has_kv) {
|
|
754
|
-
printf("%s: - check_kv: ", __func__);
|
|
755
|
-
if (handcrafted_check_kv(gguf_ctx, seed, hft >= offset_has_tensors, alignment_defined)) {
|
|
756
|
-
printf("\033[1;32mOK\033[0m\n");
|
|
757
|
-
npass++;
|
|
758
|
-
} else {
|
|
759
|
-
printf("\033[1;31mFAIL\033[0m\n");
|
|
760
|
-
}
|
|
761
|
-
ntest++;
|
|
762
|
-
}
|
|
763
|
-
|
|
764
|
-
if (expect_context_not_null(hft) && hft >= offset_has_tensors) {
|
|
765
|
-
printf("%s: - check_tensors: ", __func__);
|
|
766
|
-
if (handcrafted_check_tensors(gguf_ctx, seed)) {
|
|
767
|
-
printf("\033[1;32mOK\033[0m\n");
|
|
768
|
-
npass++;
|
|
769
|
-
} else {
|
|
770
|
-
printf("\033[1;31mFAIL\033[0m\n");
|
|
771
|
-
}
|
|
772
|
-
ntest++;
|
|
773
|
-
}
|
|
774
|
-
|
|
775
|
-
if (expect_context_not_null(hft) && hft >= offset_has_data) {
|
|
776
|
-
printf("%s: - check_tensor_data: ", __func__);
|
|
777
|
-
if (handcrafted_check_tensor_data(gguf_ctx, seed, file)) {
|
|
778
|
-
printf("\033[1;32mOK\033[0m\n");
|
|
779
|
-
npass++;
|
|
780
|
-
} else {
|
|
781
|
-
printf("\033[1;31mFAIL\033[0m\n");
|
|
782
|
-
}
|
|
783
|
-
ntest++;
|
|
784
|
-
}
|
|
785
|
-
|
|
786
|
-
fclose(file);
|
|
787
|
-
if (gguf_ctx) {
|
|
788
|
-
ggml_free(ctx);
|
|
789
|
-
gguf_free(gguf_ctx);
|
|
790
|
-
}
|
|
791
|
-
printf("\n");
|
|
792
|
-
}
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
return std::make_pair(npass, ntest);
|
|
796
|
-
}
|
|
797
|
-
|
|
798
|
-
struct random_gguf_context_result {
|
|
799
|
-
struct gguf_context * gguf_ctx;
|
|
800
|
-
struct ggml_context * ctx;
|
|
801
|
-
ggml_backend_buffer_t buffer;
|
|
802
|
-
};
|
|
803
|
-
|
|
804
|
-
static struct random_gguf_context_result get_random_gguf_context(ggml_backend_t backend, const unsigned int seed) {
|
|
805
|
-
std::mt19937 rng(seed);
|
|
806
|
-
|
|
807
|
-
struct gguf_context * gguf_ctx = gguf_init_empty();
|
|
808
|
-
|
|
809
|
-
for (int i = 0; i < 256; ++i) {
|
|
810
|
-
const std::string key = "my_key_" + std::to_string(rng() % 1024);
|
|
811
|
-
const enum gguf_type type = gguf_type(rng() % GGUF_TYPE_COUNT);
|
|
812
|
-
|
|
813
|
-
switch (type) {
|
|
814
|
-
case GGUF_TYPE_UINT8: gguf_set_val_u8 (gguf_ctx, key.c_str(), rng() % (1 << 7)); break;
|
|
815
|
-
case GGUF_TYPE_INT8: gguf_set_val_i8 (gguf_ctx, key.c_str(), rng() % (1 << 7) - (1 << 6)); break;
|
|
816
|
-
case GGUF_TYPE_UINT16: gguf_set_val_u16 (gguf_ctx, key.c_str(), rng() % (1 << 15)); break;
|
|
817
|
-
case GGUF_TYPE_INT16: gguf_set_val_i16 (gguf_ctx, key.c_str(), rng() % (1 << 15) - (1 << 14)); break;
|
|
818
|
-
case GGUF_TYPE_UINT32: gguf_set_val_u32 (gguf_ctx, key.c_str(), rng()); break;
|
|
819
|
-
case GGUF_TYPE_INT32: gguf_set_val_i32 (gguf_ctx, key.c_str(), rng() - (1 << 30)); break;
|
|
820
|
-
case GGUF_TYPE_FLOAT32: gguf_set_val_f32 (gguf_ctx, key.c_str(), rng() % 1024 - 512); break;
|
|
821
|
-
case GGUF_TYPE_BOOL: gguf_set_val_bool(gguf_ctx, key.c_str(), rng() % 2 == 0); break;
|
|
822
|
-
case GGUF_TYPE_STRING: gguf_set_val_str (gguf_ctx, key.c_str(), std::to_string(rng()).c_str()); break;
|
|
823
|
-
case GGUF_TYPE_UINT64: gguf_set_val_u64 (gguf_ctx, key.c_str(), rng()); break;
|
|
824
|
-
case GGUF_TYPE_INT64: gguf_set_val_i64 (gguf_ctx, key.c_str(), rng() - (1 << 30)); break;
|
|
825
|
-
case GGUF_TYPE_FLOAT64: gguf_set_val_f32 (gguf_ctx, key.c_str(), rng() % 1024 - 512); break;
|
|
826
|
-
case GGUF_TYPE_ARRAY: {
|
|
827
|
-
const enum gguf_type type_arr = gguf_type(rng() % GGUF_TYPE_COUNT);
|
|
828
|
-
const uint64_t ne = rng() % 1024;
|
|
829
|
-
|
|
830
|
-
switch (type_arr) {
|
|
831
|
-
case GGUF_TYPE_UINT8:
|
|
832
|
-
case GGUF_TYPE_INT8:
|
|
833
|
-
case GGUF_TYPE_UINT16:
|
|
834
|
-
case GGUF_TYPE_INT16:
|
|
835
|
-
case GGUF_TYPE_UINT32:
|
|
836
|
-
case GGUF_TYPE_INT32:
|
|
837
|
-
case GGUF_TYPE_FLOAT32:
|
|
838
|
-
case GGUF_TYPE_BOOL:
|
|
839
|
-
case GGUF_TYPE_UINT64:
|
|
840
|
-
case GGUF_TYPE_INT64:
|
|
841
|
-
case GGUF_TYPE_FLOAT64: {
|
|
842
|
-
const size_t nbytes = ne*gguf_type_size(type_arr);
|
|
843
|
-
std::vector<uint32_t> random_data((nbytes + sizeof(uint32_t) - 1) / sizeof(uint32_t));
|
|
844
|
-
for (size_t j = 0; j < random_data.size(); ++j) {
|
|
845
|
-
random_data[j] = rng();
|
|
846
|
-
if (type_arr == GGUF_TYPE_BOOL) {
|
|
847
|
-
random_data[j] &= 0x01010101; // the sanitizer complains if booleans are not 0 or 1
|
|
848
|
-
}
|
|
849
|
-
}
|
|
850
|
-
gguf_set_arr_data(gguf_ctx, key.c_str(), type_arr, random_data.data(), ne);
|
|
851
|
-
} break;
|
|
852
|
-
case GGUF_TYPE_STRING: {
|
|
853
|
-
std::vector<std::string> data_cpp(ne);
|
|
854
|
-
std::vector<const char *> data_c(ne);
|
|
855
|
-
for (size_t j = 0; j < data_cpp.size(); ++j) {
|
|
856
|
-
data_cpp[j] = std::to_string(rng());
|
|
857
|
-
data_c[j] = data_cpp[j].c_str();
|
|
858
|
-
}
|
|
859
|
-
gguf_set_arr_str(gguf_ctx, key.c_str(), data_c.data(), ne);
|
|
860
|
-
} break;
|
|
861
|
-
case GGUF_TYPE_ARRAY: {
|
|
862
|
-
break; // not supported
|
|
863
|
-
}
|
|
864
|
-
case GGUF_TYPE_COUNT:
|
|
865
|
-
default: {
|
|
866
|
-
GGML_ABORT("fatal error");
|
|
867
|
-
}
|
|
868
|
-
}
|
|
869
|
-
} break;
|
|
870
|
-
case GGUF_TYPE_COUNT:
|
|
871
|
-
default: {
|
|
872
|
-
GGML_ABORT("fatal error");
|
|
873
|
-
}
|
|
874
|
-
}
|
|
875
|
-
}
|
|
876
|
-
|
|
877
|
-
struct ggml_init_params ggml_params = {
|
|
878
|
-
/*.mem_size =*/ 256*ggml_tensor_overhead(),
|
|
879
|
-
/*.mem_buffer =*/ nullptr,
|
|
880
|
-
/*.no_alloc =*/ true,
|
|
881
|
-
};
|
|
882
|
-
struct ggml_context * ctx = ggml_init(ggml_params);
|
|
883
|
-
|
|
884
|
-
for (int i = 0; i < 256; ++i) {
|
|
885
|
-
const std::string name = "my_tensor_" + std::to_string(i);
|
|
886
|
-
const enum ggml_type type = ggml_type(rng() % GGML_TYPE_COUNT);
|
|
887
|
-
const size_t type_size = ggml_type_size(type);
|
|
888
|
-
|
|
889
|
-
if (type_size == 0) {
|
|
890
|
-
continue;
|
|
891
|
-
}
|
|
892
|
-
|
|
893
|
-
const int n_dims = 1 + rng() % GGML_MAX_DIMS;
|
|
894
|
-
int64_t ne[GGML_MAX_DIMS];
|
|
895
|
-
ne[0] = (1 + rng() % 10) * ggml_blck_size(type);
|
|
896
|
-
for (int j = 1; j < n_dims; ++j) {
|
|
897
|
-
ne[j] = 1 + rng() % 10;
|
|
898
|
-
}
|
|
899
|
-
|
|
900
|
-
struct ggml_tensor * tensor = ggml_new_tensor(ctx, type, n_dims, ne);
|
|
901
|
-
ggml_set_name(tensor, name.c_str());
|
|
902
|
-
}
|
|
903
|
-
|
|
904
|
-
ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend);
|
|
905
|
-
for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
|
|
906
|
-
const size_t nbytes = ggml_nbytes(t);
|
|
907
|
-
std::vector<uint32_t> random_data((nbytes + sizeof(uint32_t) - 1) / sizeof(uint32_t));
|
|
908
|
-
for (size_t j = 0; j < random_data.size(); ++j) {
|
|
909
|
-
random_data[j] = rng();
|
|
910
|
-
}
|
|
911
|
-
ggml_backend_tensor_set(t, random_data.data(), 0, nbytes);
|
|
912
|
-
|
|
913
|
-
gguf_add_tensor(gguf_ctx, t);
|
|
914
|
-
}
|
|
915
|
-
|
|
916
|
-
return {gguf_ctx, ctx, buf};
|
|
917
|
-
}
|
|
918
|
-
|
|
919
|
-
static bool all_kv_in_other(const gguf_context * ctx, const gguf_context * other) {
|
|
920
|
-
bool ok = true;
|
|
921
|
-
|
|
922
|
-
const int n_kv = gguf_get_n_kv(ctx);
|
|
923
|
-
for (int id = 0; id < n_kv; ++id) {
|
|
924
|
-
const char * name = gguf_get_key(ctx, id);
|
|
925
|
-
|
|
926
|
-
const int idx_other = gguf_find_key(other, name);
|
|
927
|
-
if (idx_other < 0) {
|
|
928
|
-
ok = false;
|
|
929
|
-
continue;
|
|
930
|
-
}
|
|
931
|
-
|
|
932
|
-
const gguf_type type = gguf_get_kv_type(ctx, id);
|
|
933
|
-
if (type != gguf_get_kv_type(other, idx_other)) {
|
|
934
|
-
ok = false;
|
|
935
|
-
continue;
|
|
936
|
-
}
|
|
937
|
-
|
|
938
|
-
if (type == GGUF_TYPE_ARRAY) {
|
|
939
|
-
const size_t arr_n = gguf_get_arr_n(ctx, id);
|
|
940
|
-
if (arr_n != gguf_get_arr_n(other, idx_other)) {
|
|
941
|
-
ok = false;
|
|
942
|
-
continue;
|
|
943
|
-
}
|
|
944
|
-
|
|
945
|
-
const gguf_type type_arr = gguf_get_arr_type(ctx, id);
|
|
946
|
-
if (type_arr != gguf_get_arr_type(other, idx_other)) {
|
|
947
|
-
ok = false;
|
|
948
|
-
continue;
|
|
949
|
-
}
|
|
950
|
-
|
|
951
|
-
if (type_arr == GGUF_TYPE_BOOL) {
|
|
952
|
-
const int8_t * data = reinterpret_cast<const int8_t *>(gguf_get_arr_data(ctx, id));
|
|
953
|
-
const int8_t * data_other = reinterpret_cast<const int8_t *>(gguf_get_arr_data(other, idx_other));
|
|
954
|
-
for (size_t arr_i = 0; arr_i < arr_n; ++arr_i) {
|
|
955
|
-
if (bool(data[arr_i]) != bool(data_other[arr_i])) {
|
|
956
|
-
ok = false;
|
|
957
|
-
}
|
|
958
|
-
}
|
|
959
|
-
continue;
|
|
960
|
-
}
|
|
961
|
-
|
|
962
|
-
if (type_arr == GGUF_TYPE_STRING) {
|
|
963
|
-
for (size_t arr_i = 0; arr_i < arr_n; ++arr_i) {
|
|
964
|
-
const std::string str = gguf_get_arr_str(ctx, id, arr_i);
|
|
965
|
-
const std::string str_other = gguf_get_arr_str(other, idx_other, arr_i);
|
|
966
|
-
if (str != str_other) {
|
|
967
|
-
ok = false;
|
|
968
|
-
}
|
|
969
|
-
}
|
|
970
|
-
continue;
|
|
971
|
-
}
|
|
972
|
-
|
|
973
|
-
const int8_t * data = reinterpret_cast<const int8_t *>(gguf_get_arr_data(ctx, id));
|
|
974
|
-
const int8_t * data_other = reinterpret_cast<const int8_t *>(gguf_get_arr_data(other, idx_other));
|
|
975
|
-
if (!std::equal(data, data + arr_n*gguf_type_size(type_arr), data_other)) {
|
|
976
|
-
ok = false;
|
|
977
|
-
}
|
|
978
|
-
continue;
|
|
979
|
-
}
|
|
980
|
-
|
|
981
|
-
if (type == GGUF_TYPE_STRING) {
|
|
982
|
-
const std::string str = gguf_get_val_str(ctx, id);
|
|
983
|
-
const std::string str_other = gguf_get_val_str(other, idx_other);
|
|
984
|
-
if (str != str_other) {
|
|
985
|
-
ok = false;
|
|
986
|
-
}
|
|
987
|
-
continue;
|
|
988
|
-
}
|
|
989
|
-
|
|
990
|
-
const char * data = reinterpret_cast<const char *>(gguf_get_val_data(ctx, id));
|
|
991
|
-
const char * data_other = reinterpret_cast<const char *>(gguf_get_val_data(other, idx_other));
|
|
992
|
-
if (!std::equal(data, data + gguf_type_size(type), data_other)) {
|
|
993
|
-
ok = false;
|
|
994
|
-
}
|
|
995
|
-
}
|
|
996
|
-
|
|
997
|
-
return ok;
|
|
998
|
-
}
|
|
999
|
-
|
|
1000
|
-
static bool all_tensors_in_other(const gguf_context * ctx, const gguf_context * other) {
|
|
1001
|
-
bool ok = true;
|
|
1002
|
-
|
|
1003
|
-
const int n_tensors = gguf_get_n_tensors(ctx);
|
|
1004
|
-
for (int id = 0; id < n_tensors; ++id) {
|
|
1005
|
-
const std::string name = gguf_get_tensor_name(ctx, id);
|
|
1006
|
-
|
|
1007
|
-
const int idx_other = gguf_find_tensor(other, name.c_str());
|
|
1008
|
-
if (id != idx_other) {
|
|
1009
|
-
ok = false;
|
|
1010
|
-
if (idx_other < 0) {
|
|
1011
|
-
continue;
|
|
1012
|
-
}
|
|
1013
|
-
}
|
|
1014
|
-
|
|
1015
|
-
const ggml_type type = gguf_get_tensor_type(ctx, id);
|
|
1016
|
-
if (type != gguf_get_tensor_type(other, id)) {
|
|
1017
|
-
ok = false;
|
|
1018
|
-
}
|
|
1019
|
-
|
|
1020
|
-
const size_t offset = gguf_get_tensor_offset(ctx, id);
|
|
1021
|
-
if (offset != gguf_get_tensor_offset(other, id)) {
|
|
1022
|
-
ok = false;
|
|
1023
|
-
}
|
|
1024
|
-
}
|
|
1025
|
-
|
|
1026
|
-
return ok;
|
|
1027
|
-
}
|
|
1028
|
-
|
|
1029
|
-
static bool same_tensor_data(const struct ggml_context * orig, const struct ggml_context * read) {
|
|
1030
|
-
bool ok = true;
|
|
1031
|
-
|
|
1032
|
-
struct ggml_tensor * t_orig = ggml_get_first_tensor(orig);
|
|
1033
|
-
struct ggml_tensor * t_read = ggml_get_first_tensor(read);
|
|
1034
|
-
|
|
1035
|
-
if (std::string(t_read->name) != "GGUF tensor data binary blob") {
|
|
1036
|
-
return false;
|
|
1037
|
-
}
|
|
1038
|
-
t_read = ggml_get_next_tensor(read, t_read);
|
|
1039
|
-
|
|
1040
|
-
while (t_orig) {
|
|
1041
|
-
if (!t_read) {
|
|
1042
|
-
ok = false;
|
|
1043
|
-
break;
|
|
1044
|
-
}
|
|
1045
|
-
|
|
1046
|
-
const size_t nbytes = ggml_nbytes(t_orig);
|
|
1047
|
-
if (ggml_nbytes(t_read) != nbytes) {
|
|
1048
|
-
ok = false;
|
|
1049
|
-
break;
|
|
1050
|
-
}
|
|
1051
|
-
std::vector<char> data_orig(nbytes);
|
|
1052
|
-
ggml_backend_tensor_get(t_orig, data_orig.data(), 0, nbytes);
|
|
1053
|
-
if (!std::equal(data_orig.data(), data_orig.data() + nbytes, reinterpret_cast<const char *>(t_read->data))) {
|
|
1054
|
-
ok = false;
|
|
1055
|
-
}
|
|
1056
|
-
|
|
1057
|
-
t_orig = ggml_get_next_tensor(orig, t_orig);
|
|
1058
|
-
t_read = ggml_get_next_tensor(read, t_read);
|
|
1059
|
-
}
|
|
1060
|
-
if (t_read) {
|
|
1061
|
-
ok = false;
|
|
1062
|
-
}
|
|
1063
|
-
|
|
1064
|
-
return ok;
|
|
1065
|
-
}
|
|
1066
|
-
|
|
1067
|
-
static std::pair<int, int> test_roundtrip(ggml_backend_dev_t dev, const unsigned int seed, const bool only_meta) {
|
|
1068
|
-
ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr);
|
|
1069
|
-
printf("%s: device=%s, backend=%s, only_meta=%s\n",
|
|
1070
|
-
__func__, ggml_backend_dev_description(dev), ggml_backend_name(backend), only_meta ? "yes" : "no");
|
|
1071
|
-
|
|
1072
|
-
int npass = 0;
|
|
1073
|
-
int ntest = 0;
|
|
1074
|
-
|
|
1075
|
-
struct gguf_context * gguf_ctx_0;
|
|
1076
|
-
struct ggml_context * ctx_0;
|
|
1077
|
-
ggml_backend_buffer_t bbuf;
|
|
1078
|
-
{
|
|
1079
|
-
struct random_gguf_context_result result = get_random_gguf_context(backend, seed);
|
|
1080
|
-
gguf_ctx_0 = result.gguf_ctx;
|
|
1081
|
-
ctx_0 = result.ctx;
|
|
1082
|
-
bbuf = result.buffer;
|
|
1083
|
-
}
|
|
1084
|
-
|
|
1085
|
-
FILE * file = tmpfile();
|
|
1086
|
-
|
|
1087
|
-
#ifdef _WIN32
|
|
1088
|
-
if (!file) {
|
|
1089
|
-
printf("failed to create tmpfile(), needs elevated privileges on Windows");
|
|
1090
|
-
printf("skipping tests");
|
|
1091
|
-
return std::make_pair(0, 0);
|
|
1092
|
-
}
|
|
1093
|
-
#else
|
|
1094
|
-
GGML_ASSERT(file);
|
|
1095
|
-
#endif // _WIN32
|
|
1096
|
-
|
|
1097
|
-
{
|
|
1098
|
-
std::vector<int8_t> buf;
|
|
1099
|
-
gguf_write_to_buf(gguf_ctx_0, buf, only_meta);
|
|
1100
|
-
GGML_ASSERT(fwrite(buf.data(), 1, buf.size(), file) == buf.size());
|
|
1101
|
-
rewind(file);
|
|
1102
|
-
}
|
|
1103
|
-
|
|
1104
|
-
struct ggml_context * ctx_1 = nullptr;
|
|
1105
|
-
struct gguf_init_params gguf_params = {
|
|
1106
|
-
/*no_alloc =*/ false,
|
|
1107
|
-
/*ctx =*/ only_meta ? nullptr : &ctx_1,
|
|
1108
|
-
};
|
|
1109
|
-
struct gguf_context * gguf_ctx_1 = gguf_init_from_file_impl(file, gguf_params);
|
|
1110
|
-
|
|
1111
|
-
printf("%s: same_version: ", __func__);
|
|
1112
|
-
if (gguf_get_version(gguf_ctx_0) == gguf_get_version(gguf_ctx_1)) {
|
|
1113
|
-
printf("\033[1;32mOK\033[0m\n");
|
|
1114
|
-
npass++;
|
|
1115
|
-
} else {
|
|
1116
|
-
printf("\033[1;31mFAIL\033[0m\n");
|
|
1117
|
-
}
|
|
1118
|
-
ntest++;
|
|
1119
|
-
|
|
1120
|
-
printf("%s: same_n_kv: ", __func__);
|
|
1121
|
-
if (gguf_get_n_kv(gguf_ctx_0) == gguf_get_n_kv(gguf_ctx_1)) {
|
|
1122
|
-
printf("\033[1;32mOK\033[0m\n");
|
|
1123
|
-
npass++;
|
|
1124
|
-
} else {
|
|
1125
|
-
printf("\033[1;31mFAIL\033[0m\n");
|
|
1126
|
-
}
|
|
1127
|
-
ntest++;
|
|
1128
|
-
|
|
1129
|
-
printf("%s: same_n_tensors: ", __func__);
|
|
1130
|
-
if (gguf_get_n_tensors(gguf_ctx_0) == gguf_get_n_tensors(gguf_ctx_1)) {
|
|
1131
|
-
printf("\033[1;32mOK\033[0m\n");
|
|
1132
|
-
npass++;
|
|
1133
|
-
} else {
|
|
1134
|
-
printf("\033[1;31mFAIL\033[0m\n");
|
|
1135
|
-
}
|
|
1136
|
-
ntest++;
|
|
1137
|
-
|
|
1138
|
-
printf("%s: all_orig_kv_in_read: ", __func__);
|
|
1139
|
-
if (all_kv_in_other(gguf_ctx_0, gguf_ctx_1)) {
|
|
1140
|
-
printf("\033[1;32mOK\033[0m\n");
|
|
1141
|
-
npass++;
|
|
1142
|
-
} else {
|
|
1143
|
-
printf("\033[1;31mFAIL\033[0m\n");
|
|
1144
|
-
}
|
|
1145
|
-
ntest++;
|
|
1146
|
-
|
|
1147
|
-
printf("%s: all_read_kv_in_orig: ", __func__);
|
|
1148
|
-
if (all_kv_in_other(gguf_ctx_1, gguf_ctx_0)) {
|
|
1149
|
-
printf("\033[1;32mOK\033[0m\n");
|
|
1150
|
-
npass++;
|
|
1151
|
-
} else {
|
|
1152
|
-
printf("\033[1;31mFAIL\033[0m\n");
|
|
1153
|
-
}
|
|
1154
|
-
ntest++;
|
|
1155
|
-
|
|
1156
|
-
printf("%s: all_orig_tensors_in_read: ", __func__);
|
|
1157
|
-
if (all_tensors_in_other(gguf_ctx_0, gguf_ctx_1)) {
|
|
1158
|
-
printf("\033[1;32mOK\033[0m\n");
|
|
1159
|
-
npass++;
|
|
1160
|
-
} else {
|
|
1161
|
-
printf("\033[1;31mFAIL\033[0m\n");
|
|
1162
|
-
}
|
|
1163
|
-
ntest++;
|
|
1164
|
-
|
|
1165
|
-
printf("%s: all_read_tensors_in_orig: ", __func__);
|
|
1166
|
-
if (all_tensors_in_other(gguf_ctx_1, gguf_ctx_0)) {
|
|
1167
|
-
printf("\033[1;32mOK\033[0m\n");
|
|
1168
|
-
npass++;
|
|
1169
|
-
} else {
|
|
1170
|
-
printf("\033[1;31mFAIL\033[0m\n");
|
|
1171
|
-
}
|
|
1172
|
-
ntest++;
|
|
1173
|
-
|
|
1174
|
-
if (!only_meta) {
|
|
1175
|
-
printf("%s: same_tensor_data: ", __func__);
|
|
1176
|
-
if (same_tensor_data(ctx_0, ctx_1)) {
|
|
1177
|
-
printf("\033[1;32mOK\033[0m\n");
|
|
1178
|
-
npass++;
|
|
1179
|
-
} else {
|
|
1180
|
-
printf("\033[1;31mFAIL\033[0m\n");
|
|
1181
|
-
}
|
|
1182
|
-
ntest++;
|
|
1183
|
-
}
|
|
1184
|
-
|
|
1185
|
-
ggml_backend_buffer_free(bbuf);
|
|
1186
|
-
ggml_free(ctx_0);
|
|
1187
|
-
ggml_free(ctx_1);
|
|
1188
|
-
gguf_free(gguf_ctx_0);
|
|
1189
|
-
gguf_free(gguf_ctx_1);
|
|
1190
|
-
ggml_backend_free(backend);
|
|
1191
|
-
fclose(file);
|
|
1192
|
-
|
|
1193
|
-
printf("\n");
|
|
1194
|
-
return std::make_pair(npass, ntest);
|
|
1195
|
-
}
|
|
1196
|
-
|
|
1197
|
-
static std::pair<int, int> test_gguf_set_kv(ggml_backend_dev_t dev, const unsigned int seed) {
|
|
1198
|
-
ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr);
|
|
1199
|
-
printf("%s: device=%s, backend=%s\n", __func__, ggml_backend_dev_description(dev), ggml_backend_name(backend));
|
|
1200
|
-
|
|
1201
|
-
int npass = 0;
|
|
1202
|
-
int ntest = 0;
|
|
1203
|
-
|
|
1204
|
-
struct gguf_context * gguf_ctx_0;
|
|
1205
|
-
struct ggml_context * ctx_0;
|
|
1206
|
-
ggml_backend_buffer_t bbuf_0;
|
|
1207
|
-
{
|
|
1208
|
-
struct random_gguf_context_result result = get_random_gguf_context(backend, seed);
|
|
1209
|
-
gguf_ctx_0 = result.gguf_ctx;
|
|
1210
|
-
ctx_0 = result.ctx;
|
|
1211
|
-
bbuf_0 = result.buffer;
|
|
1212
|
-
}
|
|
1213
|
-
|
|
1214
|
-
struct gguf_context * gguf_ctx_1;
|
|
1215
|
-
struct ggml_context * ctx_1;
|
|
1216
|
-
ggml_backend_buffer_t bbuf_1;
|
|
1217
|
-
{
|
|
1218
|
-
struct random_gguf_context_result result = get_random_gguf_context(backend, seed + 1);
|
|
1219
|
-
gguf_ctx_1 = result.gguf_ctx;
|
|
1220
|
-
ctx_1 = result.ctx;
|
|
1221
|
-
bbuf_1 = result.buffer;
|
|
1222
|
-
}
|
|
1223
|
-
|
|
1224
|
-
struct gguf_context * gguf_ctx_2 = gguf_init_empty();
|
|
1225
|
-
|
|
1226
|
-
gguf_set_kv(gguf_ctx_1, gguf_ctx_0);
|
|
1227
|
-
gguf_set_kv(gguf_ctx_2, gguf_ctx_0);
|
|
1228
|
-
|
|
1229
|
-
printf("%s: same_n_kv: ", __func__);
|
|
1230
|
-
if (gguf_get_n_kv(gguf_ctx_0) == gguf_get_n_kv(gguf_ctx_2)) {
|
|
1231
|
-
printf("\033[1;32mOK\033[0m\n");
|
|
1232
|
-
npass++;
|
|
1233
|
-
} else {
|
|
1234
|
-
printf("\033[1;31mFAIL\033[0m\n");
|
|
1235
|
-
}
|
|
1236
|
-
ntest++;
|
|
1237
|
-
|
|
1238
|
-
printf("%s: all_kv_0_in_1: ", __func__);
|
|
1239
|
-
if (all_kv_in_other(gguf_ctx_0, gguf_ctx_1)) {
|
|
1240
|
-
printf("\033[1;32mOK\033[0m\n");
|
|
1241
|
-
npass++;
|
|
1242
|
-
} else {
|
|
1243
|
-
printf("\033[1;31mFAIL\033[0m\n");
|
|
1244
|
-
}
|
|
1245
|
-
ntest++;
|
|
1246
|
-
|
|
1247
|
-
printf("%s: all_kv_0_in_2: ", __func__);
|
|
1248
|
-
if (all_kv_in_other(gguf_ctx_0, gguf_ctx_2)) {
|
|
1249
|
-
printf("\033[1;32mOK\033[0m\n");
|
|
1250
|
-
npass++;
|
|
1251
|
-
} else {
|
|
1252
|
-
printf("\033[1;31mFAIL\033[0m\n");
|
|
1253
|
-
}
|
|
1254
|
-
ntest++;
|
|
1255
|
-
|
|
1256
|
-
gguf_set_kv(gguf_ctx_0, gguf_ctx_1);
|
|
1257
|
-
|
|
1258
|
-
printf("%s: same_n_kv_after_double_copy: ", __func__);
|
|
1259
|
-
if (gguf_get_n_kv(gguf_ctx_0) == gguf_get_n_kv(gguf_ctx_1)) {
|
|
1260
|
-
printf("\033[1;32mOK\033[0m\n");
|
|
1261
|
-
npass++;
|
|
1262
|
-
} else {
|
|
1263
|
-
printf("\033[1;31mFAIL\033[0m\n");
|
|
1264
|
-
}
|
|
1265
|
-
ntest++;
|
|
1266
|
-
|
|
1267
|
-
printf("%s: all_kv_1_in_0_after_double_copy: ", __func__);
|
|
1268
|
-
if (all_kv_in_other(gguf_ctx_1, gguf_ctx_0)) {
|
|
1269
|
-
printf("\033[1;32mOK\033[0m\n");
|
|
1270
|
-
npass++;
|
|
1271
|
-
} else {
|
|
1272
|
-
printf("\033[1;31mFAIL\033[0m\n");
|
|
1273
|
-
}
|
|
1274
|
-
ntest++;
|
|
1275
|
-
|
|
1276
|
-
ggml_backend_buffer_free(bbuf_0);
|
|
1277
|
-
ggml_backend_buffer_free(bbuf_1);
|
|
1278
|
-
ggml_free(ctx_0);
|
|
1279
|
-
ggml_free(ctx_1);
|
|
1280
|
-
gguf_free(gguf_ctx_0);
|
|
1281
|
-
gguf_free(gguf_ctx_1);
|
|
1282
|
-
gguf_free(gguf_ctx_2);
|
|
1283
|
-
ggml_backend_free(backend);
|
|
1284
|
-
|
|
1285
|
-
printf("\n");
|
|
1286
|
-
return std::make_pair(npass, ntest);
|
|
1287
|
-
}
|
|
1288
|
-
|
|
1289
|
-
static void print_usage() {
|
|
1290
|
-
printf("usage: test-gguf [seed]\n");
|
|
1291
|
-
printf(" if no seed is unspecified then a random seed is used\n");
|
|
1292
|
-
}
|
|
1293
|
-
|
|
1294
|
-
int main(int argc, char ** argv) {
|
|
1295
|
-
if (argc > 2) {
|
|
1296
|
-
print_usage();
|
|
1297
|
-
return 1;
|
|
1298
|
-
}
|
|
1299
|
-
|
|
1300
|
-
std::random_device rd;
|
|
1301
|
-
const unsigned int seed = argc < 2 ? rd() : std::stoi(argv[1]);
|
|
1302
|
-
|
|
1303
|
-
// Initialize ggml backends early so the prints aren't interleaved with the test results:
|
|
1304
|
-
ggml_backend_dev_count();
|
|
1305
|
-
fprintf(stderr, "\n");
|
|
1306
|
-
|
|
1307
|
-
int npass = 0;
|
|
1308
|
-
int ntest = 0;
|
|
1309
|
-
{
|
|
1310
|
-
std::pair<int, int> result = test_handcrafted_file(seed);
|
|
1311
|
-
npass += result.first;
|
|
1312
|
-
ntest += result.second;
|
|
1313
|
-
}
|
|
1314
|
-
|
|
1315
|
-
for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
|
|
1316
|
-
ggml_backend_dev_t dev = ggml_backend_dev_get(i);
|
|
1317
|
-
|
|
1318
|
-
for (bool only_meta : {true, false}) {
|
|
1319
|
-
std::pair<int, int> result = test_roundtrip(dev, seed, only_meta);
|
|
1320
|
-
npass += result.first;
|
|
1321
|
-
ntest += result.second;
|
|
1322
|
-
}
|
|
1323
|
-
|
|
1324
|
-
{
|
|
1325
|
-
std::pair<int, int> result = test_gguf_set_kv(dev, seed);
|
|
1326
|
-
npass += result.first;
|
|
1327
|
-
ntest += result.second;
|
|
1328
|
-
}
|
|
1329
|
-
}
|
|
1330
|
-
|
|
1331
|
-
printf("%d/%d tests passed\n", npass, ntest);
|
|
1332
|
-
if (npass != ntest) {
|
|
1333
|
-
printf("\033[1;31mFAIL\033[0m\n");
|
|
1334
|
-
return 1;
|
|
1335
|
-
}
|
|
1336
|
-
printf("\033[1;32mOK\033[0m\n");
|
|
1337
|
-
return 0;
|
|
1338
|
-
}
|