@fugood/llama.node 0.6.3 → 1.0.0-beta.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CMakeLists.txt +40 -30
- package/README.md +4 -1
- package/lib/binding.js +41 -29
- package/lib/binding.ts +26 -25
- package/package.json +40 -7
- package/scripts/build.js +47 -0
- package/scripts/llama.cpp.patch +109 -0
- package/src/anyascii.c +22223 -0
- package/src/anyascii.h +42 -0
- package/src/tts_utils.cpp +20 -7
- package/src/tts_utils.h +2 -0
- package/bin/darwin/arm64/llama-node.node +0 -0
- package/bin/darwin/x64/llama-node.node +0 -0
- package/bin/linux/arm64/llama-node.node +0 -0
- package/bin/linux/x64/llama-node.node +0 -0
- package/bin/linux-cuda/arm64/llama-node.node +0 -0
- package/bin/linux-cuda/x64/llama-node.node +0 -0
- package/bin/linux-vulkan/arm64/llama-node.node +0 -0
- package/bin/linux-vulkan/x64/llama-node.node +0 -0
- package/bin/win32/x64/llama-node.node +0 -0
- package/bin/win32/x64/node.lib +0 -0
- package/bin/win32-vulkan/arm64/llama-node.node +0 -0
- package/bin/win32-vulkan/arm64/node.lib +0 -0
- package/bin/win32-vulkan/x64/llama-node.node +0 -0
- package/bin/win32-vulkan/x64/node.lib +0 -0
- package/src/llama.cpp/.github/workflows/build-linux-cross.yml +0 -233
- package/src/llama.cpp/.github/workflows/build.yml +0 -1078
- package/src/llama.cpp/.github/workflows/close-issue.yml +0 -28
- package/src/llama.cpp/.github/workflows/docker.yml +0 -178
- package/src/llama.cpp/.github/workflows/editorconfig.yml +0 -29
- package/src/llama.cpp/.github/workflows/gguf-publish.yml +0 -44
- package/src/llama.cpp/.github/workflows/labeler.yml +0 -17
- package/src/llama.cpp/.github/workflows/python-check-requirements.yml +0 -33
- package/src/llama.cpp/.github/workflows/python-lint.yml +0 -30
- package/src/llama.cpp/.github/workflows/python-type-check.yml +0 -40
- package/src/llama.cpp/.github/workflows/release.yml +0 -739
- package/src/llama.cpp/.github/workflows/server.yml +0 -237
- package/src/llama.cpp/.github/workflows/winget.yml +0 -42
- package/src/llama.cpp/cmake/arm64-apple-clang.cmake +0 -16
- package/src/llama.cpp/cmake/arm64-windows-llvm.cmake +0 -16
- package/src/llama.cpp/cmake/build-info.cmake +0 -64
- package/src/llama.cpp/cmake/common.cmake +0 -35
- package/src/llama.cpp/cmake/git-vars.cmake +0 -22
- package/src/llama.cpp/cmake/x64-windows-llvm.cmake +0 -5
- package/src/llama.cpp/common/build-info.cpp.in +0 -4
- package/src/llama.cpp/docs/build.md +0 -561
- package/src/llama.cpp/examples/CMakeLists.txt +0 -43
- package/src/llama.cpp/examples/batched/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/batched/batched.cpp +0 -246
- package/src/llama.cpp/examples/chat-13B.bat +0 -57
- package/src/llama.cpp/examples/convert-llama2c-to-ggml/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +0 -941
- package/src/llama.cpp/examples/deprecation-warning/deprecation-warning.cpp +0 -35
- package/src/llama.cpp/examples/embedding/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/embedding/embedding.cpp +0 -323
- package/src/llama.cpp/examples/eval-callback/CMakeLists.txt +0 -10
- package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +0 -194
- package/src/llama.cpp/examples/gen-docs/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/gen-docs/gen-docs.cpp +0 -83
- package/src/llama.cpp/examples/gguf/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/gguf/gguf.cpp +0 -265
- package/src/llama.cpp/examples/gguf-hash/CMakeLists.txt +0 -22
- package/src/llama.cpp/examples/gguf-hash/deps/rotate-bits/rotate-bits.h +0 -46
- package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.c +0 -295
- package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.h +0 -52
- package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.c +0 -221
- package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.h +0 -24
- package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.c +0 -42
- package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.h +0 -7093
- package/src/llama.cpp/examples/gguf-hash/gguf-hash.cpp +0 -694
- package/src/llama.cpp/examples/gritlm/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/gritlm/gritlm.cpp +0 -229
- package/src/llama.cpp/examples/jeopardy/questions.txt +0 -100
- package/src/llama.cpp/examples/llama.android/app/build.gradle.kts +0 -65
- package/src/llama.cpp/examples/llama.android/build.gradle.kts +0 -6
- package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +0 -71
- package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/CMakeLists.txt +0 -53
- package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +0 -452
- package/src/llama.cpp/examples/llama.android/settings.gradle.kts +0 -18
- package/src/llama.cpp/examples/lookahead/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/lookahead/lookahead.cpp +0 -472
- package/src/llama.cpp/examples/lookup/CMakeLists.txt +0 -23
- package/src/llama.cpp/examples/lookup/lookup-create.cpp +0 -40
- package/src/llama.cpp/examples/lookup/lookup-merge.cpp +0 -47
- package/src/llama.cpp/examples/lookup/lookup-stats.cpp +0 -157
- package/src/llama.cpp/examples/lookup/lookup.cpp +0 -242
- package/src/llama.cpp/examples/parallel/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/parallel/parallel.cpp +0 -492
- package/src/llama.cpp/examples/passkey/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/passkey/passkey.cpp +0 -277
- package/src/llama.cpp/examples/retrieval/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/retrieval/retrieval.cpp +0 -304
- package/src/llama.cpp/examples/save-load-state/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +0 -246
- package/src/llama.cpp/examples/simple/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/simple/simple.cpp +0 -206
- package/src/llama.cpp/examples/simple-chat/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/simple-chat/simple-chat.cpp +0 -206
- package/src/llama.cpp/examples/simple-cmake-pkg/CMakeLists.txt +0 -11
- package/src/llama.cpp/examples/speculative/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/speculative/speculative.cpp +0 -644
- package/src/llama.cpp/examples/speculative-simple/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/speculative-simple/speculative-simple.cpp +0 -261
- package/src/llama.cpp/examples/sycl/CMakeLists.txt +0 -9
- package/src/llama.cpp/examples/sycl/build.sh +0 -23
- package/src/llama.cpp/examples/sycl/ls-sycl-device.cpp +0 -13
- package/src/llama.cpp/examples/sycl/run-llama2.sh +0 -27
- package/src/llama.cpp/examples/sycl/run-llama3.sh +0 -28
- package/src/llama.cpp/examples/sycl/win-build-sycl.bat +0 -33
- package/src/llama.cpp/examples/sycl/win-run-llama2.bat +0 -9
- package/src/llama.cpp/examples/sycl/win-run-llama3.bat +0 -9
- package/src/llama.cpp/examples/training/CMakeLists.txt +0 -5
- package/src/llama.cpp/examples/training/finetune.cpp +0 -96
- package/src/llama.cpp/ggml/cmake/GitVars.cmake +0 -22
- package/src/llama.cpp/ggml/cmake/common.cmake +0 -26
- package/src/llama.cpp/ggml/src/ggml-alloc.c +0 -1042
- package/src/llama.cpp/ggml/src/ggml-backend-impl.h +0 -255
- package/src/llama.cpp/ggml/src/ggml-backend-reg.cpp +0 -586
- package/src/llama.cpp/ggml/src/ggml-backend.cpp +0 -2008
- package/src/llama.cpp/ggml/src/ggml-blas/CMakeLists.txt +0 -87
- package/src/llama.cpp/ggml/src/ggml-blas/ggml-blas.cpp +0 -517
- package/src/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +0 -74
- package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +0 -179
- package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +0 -258
- package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +0 -2863
- package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +0 -1110
- package/src/llama.cpp/ggml/src/ggml-cann/common.h +0 -420
- package/src/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +0 -2570
- package/src/llama.cpp/ggml/src/ggml-common.h +0 -1857
- package/src/llama.cpp/ggml/src/ggml-cpu/cmake/FindSIMD.cmake +0 -100
- package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +0 -184
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/cuda.h +0 -15
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +0 -243
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +0 -140
- package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +0 -131
- package/src/llama.cpp/ggml/src/ggml-impl.h +0 -601
- package/src/llama.cpp/ggml/src/ggml-kompute/CMakeLists.txt +0 -166
- package/src/llama.cpp/ggml/src/ggml-kompute/ggml-kompute.cpp +0 -2251
- package/src/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +0 -120
- package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +0 -622
- package/src/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +0 -113
- package/src/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +0 -96
- package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +0 -5124
- package/src/llama.cpp/ggml/src/ggml-opt.cpp +0 -1037
- package/src/llama.cpp/ggml/src/ggml-quants.c +0 -5232
- package/src/llama.cpp/ggml/src/ggml-quants.h +0 -100
- package/src/llama.cpp/ggml/src/ggml-rpc/CMakeLists.txt +0 -9
- package/src/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +0 -1813
- package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +0 -189
- package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +0 -37
- package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp +0 -239
- package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.hpp +0 -39
- package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +0 -83
- package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +0 -493
- package/src/llama.cpp/ggml/src/ggml-sycl/concat.cpp +0 -197
- package/src/llama.cpp/ggml/src/ggml-sycl/concat.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/conv.cpp +0 -100
- package/src/llama.cpp/ggml/src/ggml-sycl/conv.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +0 -623
- package/src/llama.cpp/ggml/src/ggml-sycl/convert.hpp +0 -34
- package/src/llama.cpp/ggml/src/ggml-sycl/cpy.cpp +0 -701
- package/src/llama.cpp/ggml/src/ggml-sycl/cpy.hpp +0 -11
- package/src/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +0 -791
- package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +0 -1160
- package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.hpp +0 -27
- package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +0 -2957
- package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +0 -1536
- package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +0 -75
- package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +0 -99
- package/src/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +0 -311
- package/src/llama.cpp/ggml/src/ggml-sycl/getrows.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +0 -4443
- package/src/llama.cpp/ggml/src/ggml-sycl/gla.cpp +0 -105
- package/src/llama.cpp/ggml/src/ggml-sycl/gla.hpp +0 -8
- package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +0 -136
- package/src/llama.cpp/ggml/src/ggml-sycl/im2col.hpp +0 -21
- package/src/llama.cpp/ggml/src/ggml-sycl/mmq.cpp +0 -3030
- package/src/llama.cpp/ggml/src/ggml-sycl/mmq.hpp +0 -33
- package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +0 -1108
- package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.hpp +0 -27
- package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +0 -474
- package/src/llama.cpp/ggml/src/ggml-sycl/norm.hpp +0 -26
- package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +0 -46
- package/src/llama.cpp/ggml/src/ggml-sycl/outprod.hpp +0 -10
- package/src/llama.cpp/ggml/src/ggml-sycl/presets.hpp +0 -74
- package/src/llama.cpp/ggml/src/ggml-sycl/quants.hpp +0 -83
- package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +0 -362
- package/src/llama.cpp/ggml/src/ggml-sycl/rope.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +0 -264
- package/src/llama.cpp/ggml/src/ggml-sycl/softmax.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.cpp +0 -13
- package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.hpp +0 -23
- package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +0 -73
- package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp +0 -20
- package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +0 -1215
- package/src/llama.cpp/ggml/src/ggml-sycl/wkv.cpp +0 -305
- package/src/llama.cpp/ggml/src/ggml-sycl/wkv.hpp +0 -10
- package/src/llama.cpp/ggml/src/ggml-threading.cpp +0 -12
- package/src/llama.cpp/ggml/src/ggml-threading.h +0 -14
- package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +0 -196
- package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +0 -10699
- package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +0 -39
- package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +0 -751
- package/src/llama.cpp/ggml/src/ggml.c +0 -6550
- package/src/llama.cpp/ggml/src/gguf.cpp +0 -1330
- package/src/llama.cpp/models/.editorconfig +0 -1
- package/src/llama.cpp/models/ggml-vocab-aquila.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-baichuan.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-command-r.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-command-r.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-command-r.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-falcon.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-falcon.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-falcon.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-gpt-neox.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-llama4.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-llama4.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-mpt.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-mpt.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-mpt.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-phi-3.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-qwen2.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-refact.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-refact.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-refact.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.out +0 -46
- package/src/llama.cpp/models/ggml-vocab-starcoder.gguf +0 -0
- package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.inp +0 -112
- package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.out +0 -46
- package/src/llama.cpp/pocs/CMakeLists.txt +0 -14
- package/src/llama.cpp/pocs/vdot/CMakeLists.txt +0 -9
- package/src/llama.cpp/pocs/vdot/q8dot.cpp +0 -173
- package/src/llama.cpp/pocs/vdot/vdot.cpp +0 -311
- package/src/llama.cpp/prompts/LLM-questions.txt +0 -49
- package/src/llama.cpp/prompts/alpaca.txt +0 -1
- package/src/llama.cpp/prompts/assistant.txt +0 -31
- package/src/llama.cpp/prompts/chat-with-baichuan.txt +0 -4
- package/src/llama.cpp/prompts/chat-with-bob.txt +0 -7
- package/src/llama.cpp/prompts/chat-with-qwen.txt +0 -1
- package/src/llama.cpp/prompts/chat-with-vicuna-v0.txt +0 -7
- package/src/llama.cpp/prompts/chat-with-vicuna-v1.txt +0 -7
- package/src/llama.cpp/prompts/chat.txt +0 -28
- package/src/llama.cpp/prompts/dan-modified.txt +0 -1
- package/src/llama.cpp/prompts/dan.txt +0 -1
- package/src/llama.cpp/prompts/mnemonics.txt +0 -93
- package/src/llama.cpp/prompts/parallel-questions.txt +0 -43
- package/src/llama.cpp/prompts/reason-act.txt +0 -18
- package/src/llama.cpp/requirements/requirements-all.txt +0 -15
- package/src/llama.cpp/requirements/requirements-compare-llama-bench.txt +0 -2
- package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt +0 -7
- package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf_update.txt +0 -7
- package/src/llama.cpp/requirements/requirements-convert_legacy_llama.txt +0 -5
- package/src/llama.cpp/requirements/requirements-convert_llama_ggml_to_gguf.txt +0 -1
- package/src/llama.cpp/requirements/requirements-convert_lora_to_gguf.txt +0 -4
- package/src/llama.cpp/requirements/requirements-gguf_editor_gui.txt +0 -3
- package/src/llama.cpp/requirements/requirements-pydantic.txt +0 -3
- package/src/llama.cpp/requirements/requirements-test-tokenizer-random.txt +0 -1
- package/src/llama.cpp/requirements/requirements-tool_bench.txt +0 -12
- package/src/llama.cpp/requirements.txt +0 -13
- package/src/llama.cpp/scripts/build-info.sh +0 -30
- package/src/llama.cpp/scripts/install-oneapi.bat +0 -19
- package/src/llama.cpp/scripts/xxd.cmake +0 -16
- package/src/llama.cpp/tests/CMakeLists.txt +0 -177
- package/src/llama.cpp/tests/get-model.cpp +0 -21
- package/src/llama.cpp/tests/get-model.h +0 -2
- package/src/llama.cpp/tests/test-arg-parser.cpp +0 -178
- package/src/llama.cpp/tests/test-autorelease.cpp +0 -24
- package/src/llama.cpp/tests/test-backend-ops.cpp +0 -4793
- package/src/llama.cpp/tests/test-barrier.cpp +0 -94
- package/src/llama.cpp/tests/test-c.c +0 -7
- package/src/llama.cpp/tests/test-chat-template.cpp +0 -417
- package/src/llama.cpp/tests/test-chat.cpp +0 -985
- package/src/llama.cpp/tests/test-double-float.cpp +0 -57
- package/src/llama.cpp/tests/test-gbnf-validator.cpp +0 -109
- package/src/llama.cpp/tests/test-gguf.cpp +0 -1338
- package/src/llama.cpp/tests/test-grammar-integration.cpp +0 -1308
- package/src/llama.cpp/tests/test-grammar-llguidance.cpp +0 -1201
- package/src/llama.cpp/tests/test-grammar-parser.cpp +0 -519
- package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +0 -1304
- package/src/llama.cpp/tests/test-llama-grammar.cpp +0 -408
- package/src/llama.cpp/tests/test-log.cpp +0 -39
- package/src/llama.cpp/tests/test-model-load-cancel.cpp +0 -27
- package/src/llama.cpp/tests/test-mtmd-c-api.c +0 -63
- package/src/llama.cpp/tests/test-opt.cpp +0 -904
- package/src/llama.cpp/tests/test-quantize-fns.cpp +0 -186
- package/src/llama.cpp/tests/test-quantize-perf.cpp +0 -365
- package/src/llama.cpp/tests/test-quantize-stats.cpp +0 -424
- package/src/llama.cpp/tests/test-regex-partial.cpp +0 -288
- package/src/llama.cpp/tests/test-rope.cpp +0 -262
- package/src/llama.cpp/tests/test-sampling.cpp +0 -399
- package/src/llama.cpp/tests/test-tokenizer-0.cpp +0 -312
- package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +0 -155
- package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +0 -125
- package/src/llama.cpp/tools/CMakeLists.txt +0 -39
- package/src/llama.cpp/tools/batched-bench/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/batched-bench/batched-bench.cpp +0 -204
- package/src/llama.cpp/tools/cvector-generator/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/cvector-generator/completions.txt +0 -582
- package/src/llama.cpp/tools/cvector-generator/cvector-generator.cpp +0 -508
- package/src/llama.cpp/tools/cvector-generator/mean.hpp +0 -48
- package/src/llama.cpp/tools/cvector-generator/negative.txt +0 -4
- package/src/llama.cpp/tools/cvector-generator/pca.hpp +0 -315
- package/src/llama.cpp/tools/cvector-generator/positive.txt +0 -4
- package/src/llama.cpp/tools/export-lora/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/export-lora/export-lora.cpp +0 -434
- package/src/llama.cpp/tools/gguf-split/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/gguf-split/gguf-split.cpp +0 -583
- package/src/llama.cpp/tools/imatrix/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/imatrix/imatrix.cpp +0 -667
- package/src/llama.cpp/tools/llama-bench/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/llama-bench/llama-bench.cpp +0 -2024
- package/src/llama.cpp/tools/main/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/main/main.cpp +0 -977
- package/src/llama.cpp/tools/mtmd/CMakeLists.txt +0 -58
- package/src/llama.cpp/tools/mtmd/clip-impl.h +0 -462
- package/src/llama.cpp/tools/mtmd/clip.cpp +0 -4024
- package/src/llama.cpp/tools/mtmd/clip.h +0 -101
- package/src/llama.cpp/tools/mtmd/deprecation-warning.cpp +0 -22
- package/src/llama.cpp/tools/mtmd/miniaudio.h +0 -93468
- package/src/llama.cpp/tools/mtmd/mtmd-audio.cpp +0 -855
- package/src/llama.cpp/tools/mtmd/mtmd-audio.h +0 -62
- package/src/llama.cpp/tools/mtmd/mtmd-cli.cpp +0 -377
- package/src/llama.cpp/tools/mtmd/mtmd-helper.cpp +0 -297
- package/src/llama.cpp/tools/mtmd/mtmd.cpp +0 -942
- package/src/llama.cpp/tools/mtmd/mtmd.h +0 -362
- package/src/llama.cpp/tools/mtmd/requirements.txt +0 -5
- package/src/llama.cpp/tools/perplexity/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/perplexity/perplexity.cpp +0 -2063
- package/src/llama.cpp/tools/quantize/CMakeLists.txt +0 -6
- package/src/llama.cpp/tools/quantize/quantize.cpp +0 -519
- package/src/llama.cpp/tools/rpc/CMakeLists.txt +0 -4
- package/src/llama.cpp/tools/rpc/rpc-server.cpp +0 -322
- package/src/llama.cpp/tools/run/CMakeLists.txt +0 -16
- package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.cpp +0 -1995
- package/src/llama.cpp/tools/run/linenoise.cpp/linenoise.h +0 -137
- package/src/llama.cpp/tools/run/run.cpp +0 -1261
- package/src/llama.cpp/tools/server/CMakeLists.txt +0 -51
- package/src/llama.cpp/tools/server/bench/requirements.txt +0 -2
- package/src/llama.cpp/tools/server/httplib.h +0 -10506
- package/src/llama.cpp/tools/server/server.cpp +0 -4966
- package/src/llama.cpp/tools/server/tests/requirements.txt +0 -8
- package/src/llama.cpp/tools/server/utils.hpp +0 -1337
- package/src/llama.cpp/tools/tokenize/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/tokenize/tokenize.cpp +0 -416
- package/src/llama.cpp/tools/tts/CMakeLists.txt +0 -5
- package/src/llama.cpp/tools/tts/tts.cpp +0 -1092
|
@@ -1,1037 +0,0 @@
|
|
|
1
|
-
#include "ggml-opt.h"
|
|
2
|
-
|
|
3
|
-
#include "ggml.h"
|
|
4
|
-
#include "ggml-alloc.h"
|
|
5
|
-
#include "ggml-backend.h"
|
|
6
|
-
#include "ggml-impl.h"
|
|
7
|
-
|
|
8
|
-
#include <algorithm>
|
|
9
|
-
#include <cmath>
|
|
10
|
-
#include <cstdint>
|
|
11
|
-
#include <cinttypes>
|
|
12
|
-
#include <map>
|
|
13
|
-
#include <random>
|
|
14
|
-
#include <vector>
|
|
15
|
-
|
|
16
|
-
struct ggml_opt_dataset {
|
|
17
|
-
struct ggml_context * ctx = nullptr;
|
|
18
|
-
ggml_backend_buffer_t buf = nullptr;
|
|
19
|
-
struct ggml_tensor * data = nullptr;
|
|
20
|
-
struct ggml_tensor * labels = nullptr;
|
|
21
|
-
|
|
22
|
-
int64_t ndata = -1;
|
|
23
|
-
int64_t ndata_shard = -1;
|
|
24
|
-
size_t nbs_data = -1;
|
|
25
|
-
size_t nbs_labels = -1;
|
|
26
|
-
|
|
27
|
-
std::vector<int64_t> permutation;
|
|
28
|
-
};
|
|
29
|
-
|
|
30
|
-
struct ggml_opt_context {
|
|
31
|
-
ggml_backend_sched_t backend_sched = nullptr;
|
|
32
|
-
ggml_cgraph * allocated_graph = nullptr;
|
|
33
|
-
ggml_cgraph * allocated_graph_copy = nullptr;
|
|
34
|
-
struct ggml_context * ctx_static = nullptr;
|
|
35
|
-
struct ggml_context * ctx_cpu = nullptr;
|
|
36
|
-
struct ggml_context * ctx_compute = nullptr;
|
|
37
|
-
struct ggml_context * ctx_copy = nullptr;
|
|
38
|
-
ggml_backend_buffer_t buf_static = nullptr;
|
|
39
|
-
ggml_backend_buffer_t buf_cpu = nullptr;
|
|
40
|
-
std::mt19937 rng;
|
|
41
|
-
enum ggml_opt_loss_type loss_type;
|
|
42
|
-
enum ggml_opt_build_type build_type;
|
|
43
|
-
enum ggml_opt_build_type build_type_alloc;
|
|
44
|
-
|
|
45
|
-
struct ggml_tensor * inputs = nullptr;
|
|
46
|
-
struct ggml_tensor * outputs = nullptr;
|
|
47
|
-
struct ggml_tensor * labels = nullptr;
|
|
48
|
-
|
|
49
|
-
struct ggml_tensor * loss = nullptr;
|
|
50
|
-
struct ggml_tensor * pred = nullptr;
|
|
51
|
-
struct ggml_tensor * ncorrect = nullptr;
|
|
52
|
-
|
|
53
|
-
struct ggml_cgraph * gf = nullptr;
|
|
54
|
-
struct ggml_cgraph * gb_grad = nullptr;
|
|
55
|
-
struct ggml_cgraph * gb_opt = nullptr;
|
|
56
|
-
bool static_graphs = false;
|
|
57
|
-
bool eval_ready = false;
|
|
58
|
-
std::vector<struct ggml_tensor *> grad_accs;
|
|
59
|
-
std::vector<struct ggml_tensor *> grad_m;
|
|
60
|
-
std::vector<struct ggml_tensor *> grad_v;
|
|
61
|
-
|
|
62
|
-
int64_t iter = 1;
|
|
63
|
-
int32_t opt_period = 1;
|
|
64
|
-
int32_t opt_i = 0;
|
|
65
|
-
bool loss_per_datapoint = false;
|
|
66
|
-
|
|
67
|
-
ggml_opt_get_optimizer_params get_opt_pars = nullptr;
|
|
68
|
-
void * get_opt_pars_ud = nullptr;
|
|
69
|
-
struct ggml_tensor * adamw_params = nullptr;
|
|
70
|
-
};
|
|
71
|
-
|
|
72
|
-
struct ggml_opt_result {
|
|
73
|
-
int64_t ndata = 0;
|
|
74
|
-
std::vector<float> loss;
|
|
75
|
-
std::vector<int32_t> pred;
|
|
76
|
-
int64_t ncorrect = 0;
|
|
77
|
-
|
|
78
|
-
int64_t opt_period = -1;
|
|
79
|
-
bool loss_per_datapoint = false;
|
|
80
|
-
};
|
|
81
|
-
|
|
82
|
-
// ====== Dataset ======
|
|
83
|
-
|
|
84
|
-
ggml_opt_dataset_t ggml_opt_dataset_init(
|
|
85
|
-
enum ggml_type type_data,
|
|
86
|
-
enum ggml_type type_label,
|
|
87
|
-
int64_t ne_datapoint,
|
|
88
|
-
int64_t ne_label,
|
|
89
|
-
int64_t ndata,
|
|
90
|
-
int64_t ndata_shard) {
|
|
91
|
-
GGML_ASSERT(ne_datapoint > 0);
|
|
92
|
-
GGML_ASSERT(ne_label >= 0);
|
|
93
|
-
GGML_ASSERT(ndata > 0);
|
|
94
|
-
GGML_ASSERT(ndata_shard > 0);
|
|
95
|
-
|
|
96
|
-
ggml_opt_dataset_t result = new ggml_opt_dataset;
|
|
97
|
-
result->ndata = ndata;
|
|
98
|
-
result->ndata_shard = ndata_shard;
|
|
99
|
-
|
|
100
|
-
{
|
|
101
|
-
struct ggml_init_params params = {
|
|
102
|
-
/*.mem_size =*/ 2*ggml_tensor_overhead(),
|
|
103
|
-
/*.mem_buffer =*/ nullptr,
|
|
104
|
-
/*.no_alloc =*/ true,
|
|
105
|
-
};
|
|
106
|
-
result->ctx = ggml_init(params);
|
|
107
|
-
}
|
|
108
|
-
|
|
109
|
-
result->data = ggml_new_tensor_2d(result->ctx, type_data, ne_datapoint, ndata);
|
|
110
|
-
result->nbs_data = ggml_nbytes(result->data) * ndata_shard/ndata;
|
|
111
|
-
|
|
112
|
-
if (ne_label > 0) {
|
|
113
|
-
result->labels = ggml_new_tensor_2d(result->ctx, type_label, ne_label, ndata);
|
|
114
|
-
result->nbs_labels = ggml_nbytes(result->labels) * ndata_shard/ndata;
|
|
115
|
-
} else {
|
|
116
|
-
result->labels = nullptr;
|
|
117
|
-
result->nbs_labels = 0;
|
|
118
|
-
}
|
|
119
|
-
|
|
120
|
-
result->buf = ggml_backend_alloc_ctx_tensors_from_buft(result->ctx, ggml_backend_cpu_buffer_type());
|
|
121
|
-
|
|
122
|
-
const int64_t nshards = ndata/ndata_shard;
|
|
123
|
-
result->permutation.resize(nshards);
|
|
124
|
-
for (int64_t i = 0; i < nshards; ++i) {
|
|
125
|
-
result->permutation[i] = i;
|
|
126
|
-
}
|
|
127
|
-
return result;
|
|
128
|
-
}
|
|
129
|
-
|
|
130
|
-
void ggml_opt_dataset_free(ggml_opt_dataset_t dataset) {
|
|
131
|
-
ggml_backend_buffer_free(dataset->buf);
|
|
132
|
-
ggml_free(dataset->ctx);
|
|
133
|
-
delete dataset;
|
|
134
|
-
}
|
|
135
|
-
|
|
136
|
-
int64_t ggml_opt_dataset_ndata(ggml_opt_dataset_t dataset) {
|
|
137
|
-
return dataset->ndata;
|
|
138
|
-
}
|
|
139
|
-
|
|
140
|
-
struct ggml_tensor * ggml_opt_dataset_data(ggml_opt_dataset_t dataset) {
|
|
141
|
-
return dataset->data;
|
|
142
|
-
}
|
|
143
|
-
|
|
144
|
-
struct ggml_tensor * ggml_opt_dataset_labels(ggml_opt_dataset_t dataset) {
|
|
145
|
-
return dataset->labels;
|
|
146
|
-
}
|
|
147
|
-
|
|
148
|
-
void ggml_opt_dataset_shuffle(ggml_opt_context_t opt_ctx, ggml_opt_dataset_t dataset, int64_t idata) {
|
|
149
|
-
GGML_ASSERT(idata <= dataset->ndata);
|
|
150
|
-
|
|
151
|
-
if (idata < 0) {
|
|
152
|
-
std::shuffle(dataset->permutation.begin(), dataset->permutation.end(), opt_ctx->rng);
|
|
153
|
-
return;
|
|
154
|
-
}
|
|
155
|
-
|
|
156
|
-
GGML_ASSERT(idata % dataset->ndata_shard == 0);
|
|
157
|
-
const int64_t ishard_max = idata / dataset->ndata_shard;
|
|
158
|
-
std::shuffle(dataset->permutation.begin(), dataset->permutation.begin() + ishard_max, opt_ctx->rng);
|
|
159
|
-
}
|
|
160
|
-
|
|
161
|
-
void ggml_opt_dataset_get_batch(ggml_opt_dataset_t dataset, struct ggml_tensor * data_batch, struct ggml_tensor * labels_batch, int64_t ibatch) {
|
|
162
|
-
GGML_ASSERT( data_batch && ggml_is_contiguous(data_batch));
|
|
163
|
-
GGML_ASSERT(!labels_batch || ggml_is_contiguous(labels_batch));
|
|
164
|
-
GGML_ASSERT((labels_batch == nullptr) == (dataset->labels == nullptr));
|
|
165
|
-
GGML_ASSERT( data_batch->type == dataset->data->type);
|
|
166
|
-
GGML_ASSERT(!labels_batch || labels_batch->type == dataset->labels->type);
|
|
167
|
-
|
|
168
|
-
const size_t nb_data_batch = ggml_nbytes(data_batch);
|
|
169
|
-
GGML_ASSERT(nb_data_batch % dataset->nbs_data == 0);
|
|
170
|
-
const int64_t shards_per_batch = nb_data_batch / dataset->nbs_data;
|
|
171
|
-
|
|
172
|
-
if (labels_batch) {
|
|
173
|
-
const size_t nb_labels_batch = ggml_nbytes(labels_batch);
|
|
174
|
-
GGML_ASSERT(nb_labels_batch == shards_per_batch*dataset->nbs_labels);
|
|
175
|
-
}
|
|
176
|
-
|
|
177
|
-
GGML_ASSERT((ibatch + 1)*shards_per_batch <= int64_t(dataset->permutation.size()));
|
|
178
|
-
|
|
179
|
-
for (int64_t ishard_batch = 0; ishard_batch < shards_per_batch; ++ishard_batch) {
|
|
180
|
-
const int64_t ishard = dataset->permutation[ibatch*shards_per_batch + ishard_batch];
|
|
181
|
-
|
|
182
|
-
const char * ptr_data = (const char *) dataset->data->data + ishard*dataset->nbs_data;
|
|
183
|
-
ggml_backend_tensor_set(data_batch, ptr_data, ishard_batch*dataset->nbs_data, dataset->nbs_data);
|
|
184
|
-
|
|
185
|
-
if (!labels_batch) {
|
|
186
|
-
continue;
|
|
187
|
-
}
|
|
188
|
-
|
|
189
|
-
const char * ptr_labels = (const char *) dataset->labels->data + ishard*dataset->nbs_labels;
|
|
190
|
-
ggml_backend_tensor_set(labels_batch, ptr_labels, ishard_batch*dataset->nbs_labels, dataset->nbs_labels);
|
|
191
|
-
}
|
|
192
|
-
}
|
|
193
|
-
|
|
194
|
-
void ggml_opt_dataset_get_batch_host(ggml_opt_dataset_t dataset, void * data_batch, size_t nb_data_batch, void * labels_batch, int64_t ibatch) {
|
|
195
|
-
GGML_ASSERT((labels_batch == nullptr) == (dataset->labels == nullptr));
|
|
196
|
-
GGML_ASSERT(nb_data_batch % dataset->nbs_data == 0);
|
|
197
|
-
|
|
198
|
-
const int64_t shards_per_batch = nb_data_batch / dataset->nbs_data;
|
|
199
|
-
|
|
200
|
-
GGML_ASSERT((ibatch + 1)*shards_per_batch <= int64_t(dataset->permutation.size()));
|
|
201
|
-
|
|
202
|
-
for (int64_t ishard_batch = 0; ishard_batch < shards_per_batch; ++ishard_batch) {
|
|
203
|
-
const int64_t ishard = dataset->permutation[ibatch*shards_per_batch + ishard_batch];
|
|
204
|
-
|
|
205
|
-
const char * ptr_data = (const char *) dataset->data->data + ishard *dataset->nbs_data;
|
|
206
|
-
char * ptr_data_batch = (char *) data_batch + ishard_batch*dataset->nbs_data;
|
|
207
|
-
memcpy(ptr_data_batch, ptr_data, dataset->nbs_data);
|
|
208
|
-
|
|
209
|
-
if (!labels_batch) {
|
|
210
|
-
continue;
|
|
211
|
-
}
|
|
212
|
-
|
|
213
|
-
const char * ptr_labels = (const char *) dataset->labels->data + ishard *dataset->nbs_labels;
|
|
214
|
-
char * ptr_labels_batch = (char *) labels_batch + ishard_batch*dataset->nbs_labels;
|
|
215
|
-
memcpy(ptr_labels_batch, ptr_labels, dataset->nbs_labels);
|
|
216
|
-
}
|
|
217
|
-
}
|
|
218
|
-
|
|
219
|
-
// ====== Model / Context ======
|
|
220
|
-
|
|
221
|
-
struct ggml_opt_optimizer_params ggml_opt_get_default_optimizer_params(void * userdata) {
|
|
222
|
-
GGML_UNUSED(userdata);
|
|
223
|
-
|
|
224
|
-
ggml_opt_optimizer_params result;
|
|
225
|
-
|
|
226
|
-
result.adamw.alpha = 0.001f;
|
|
227
|
-
result.adamw.beta1 = 0.9f;
|
|
228
|
-
result.adamw.beta2 = 0.999f;
|
|
229
|
-
result.adamw.eps = 1e-8f;
|
|
230
|
-
result.adamw.wd = 0.0f;
|
|
231
|
-
|
|
232
|
-
return result;
|
|
233
|
-
}
|
|
234
|
-
|
|
235
|
-
struct ggml_opt_optimizer_params ggml_opt_get_constant_optimizer_params(void * userdata) {
|
|
236
|
-
return *((struct ggml_opt_optimizer_params *) userdata);
|
|
237
|
-
}
|
|
238
|
-
|
|
239
|
-
struct ggml_opt_params ggml_opt_default_params(
|
|
240
|
-
ggml_backend_sched_t backend_sched,
|
|
241
|
-
enum ggml_opt_loss_type loss_type) {
|
|
242
|
-
return {
|
|
243
|
-
/*backend_sched =*/ backend_sched,
|
|
244
|
-
/*ctx_compute =*/ nullptr,
|
|
245
|
-
/*inputs =*/ nullptr,
|
|
246
|
-
/*logits =*/ nullptr,
|
|
247
|
-
/*loss_type =*/ loss_type,
|
|
248
|
-
/*build_type =*/ GGML_OPT_BUILD_TYPE_OPT,
|
|
249
|
-
/*opt_period =*/ 1,
|
|
250
|
-
/*get_opt_pars =*/ ggml_opt_get_default_optimizer_params,
|
|
251
|
-
/*get_opt_pars_ud =*/ nullptr,
|
|
252
|
-
};
|
|
253
|
-
}
|
|
254
|
-
|
|
255
|
-
static ggml_tensor * map_tensor(std::map<ggml_tensor *, ggml_tensor *> & tensor_map, ggml_context * ctx, ggml_tensor * tensor) {
|
|
256
|
-
if (!tensor) {
|
|
257
|
-
return nullptr;
|
|
258
|
-
}
|
|
259
|
-
|
|
260
|
-
if (tensor_map.find(tensor) != tensor_map.end()) {
|
|
261
|
-
return tensor_map[tensor];
|
|
262
|
-
}
|
|
263
|
-
|
|
264
|
-
ggml_tensor * new_tensor = ggml_dup_tensor(ctx, tensor);
|
|
265
|
-
tensor_map[tensor] = new_tensor;
|
|
266
|
-
|
|
267
|
-
new_tensor->op = tensor->op;
|
|
268
|
-
for (int i = 0; i < GGML_MAX_DIMS; i++) {
|
|
269
|
-
new_tensor->nb[i] = tensor->nb[i];
|
|
270
|
-
}
|
|
271
|
-
new_tensor->flags = tensor->flags;
|
|
272
|
-
memcpy(new_tensor->op_params, tensor->op_params, sizeof(tensor->op_params));
|
|
273
|
-
strcpy(new_tensor->name, tensor->name);
|
|
274
|
-
new_tensor->data = tensor->data;
|
|
275
|
-
new_tensor->buffer = tensor->buffer;
|
|
276
|
-
new_tensor->extra = tensor->extra;
|
|
277
|
-
new_tensor->view_offs = tensor->view_offs;
|
|
278
|
-
new_tensor->view_src = map_tensor(tensor_map, ctx, tensor->view_src);
|
|
279
|
-
for (int i = 0; i < GGML_MAX_SRC; i++) {
|
|
280
|
-
new_tensor->src[i] = map_tensor(tensor_map, ctx, tensor->src[i]);
|
|
281
|
-
}
|
|
282
|
-
|
|
283
|
-
return new_tensor;
|
|
284
|
-
}
|
|
285
|
-
|
|
286
|
-
static ggml_cgraph * dup_graph(ggml_context * ctx, ggml_cgraph * src) {
|
|
287
|
-
std::map<ggml_tensor *, ggml_tensor *> tensor_map;
|
|
288
|
-
|
|
289
|
-
ggml_cgraph * dst = ggml_new_graph_custom(ctx, src->size, /*grads =*/ true);
|
|
290
|
-
|
|
291
|
-
for (int i = 0; i < src->n_leafs; i++) {
|
|
292
|
-
ggml_build_forward_expand(dst, map_tensor(tensor_map, ctx, src->leafs[i]));
|
|
293
|
-
}
|
|
294
|
-
GGML_ASSERT(dst->n_leafs == src->n_leafs);
|
|
295
|
-
for (int i = 0; i < src->n_nodes; i++) {
|
|
296
|
-
ggml_build_forward_expand(dst, map_tensor(tensor_map, ctx, src->nodes[i]));
|
|
297
|
-
}
|
|
298
|
-
GGML_ASSERT(dst->n_nodes == src->n_nodes);
|
|
299
|
-
for (int i = 0; i < src->n_nodes; ++i) {
|
|
300
|
-
const size_t igrad_src = ggml_hash_find(&src->visited_hash_set, src->nodes[i]);
|
|
301
|
-
const size_t igrad_dst = ggml_hash_find(&dst->visited_hash_set, dst->nodes[i]);
|
|
302
|
-
|
|
303
|
-
GGML_ASSERT(igrad_src != GGML_HASHSET_FULL);
|
|
304
|
-
GGML_ASSERT(ggml_bitset_get(src->visited_hash_set.used, igrad_src));
|
|
305
|
-
GGML_ASSERT(igrad_dst != GGML_HASHSET_FULL);
|
|
306
|
-
GGML_ASSERT(ggml_bitset_get(dst->visited_hash_set.used, igrad_dst));
|
|
307
|
-
|
|
308
|
-
dst->grads[igrad_dst] = src->grads[igrad_src];
|
|
309
|
-
dst->grad_accs[igrad_dst] = src->grad_accs[igrad_src];
|
|
310
|
-
}
|
|
311
|
-
|
|
312
|
-
return dst;
|
|
313
|
-
}
|
|
314
|
-
|
|
315
|
-
static void ggml_opt_build(ggml_opt_context_t opt_ctx) {
|
|
316
|
-
GGML_ASSERT(opt_ctx->ctx_compute && "no compute context set, either use static graphs or set one with ggml_opt_prepare_alloc");
|
|
317
|
-
GGML_ASSERT((!opt_ctx->static_graphs || opt_ctx->inputs->data) && "when using static graphs the inputs must be allocated statically");
|
|
318
|
-
|
|
319
|
-
const bool accumulate = opt_ctx->build_type_alloc >= GGML_OPT_BUILD_TYPE_GRAD &&
|
|
320
|
-
!(opt_ctx->static_graphs && opt_ctx->build_type_alloc == GGML_OPT_BUILD_TYPE_OPT && opt_ctx->opt_period == 1);
|
|
321
|
-
|
|
322
|
-
ggml_set_input(opt_ctx->inputs);
|
|
323
|
-
ggml_set_output(opt_ctx->outputs);
|
|
324
|
-
|
|
325
|
-
int n_param = 0;
|
|
326
|
-
for (int i = 0; i < opt_ctx->gf->n_nodes; ++i) {
|
|
327
|
-
const struct ggml_tensor * node = opt_ctx->gf->nodes[i];
|
|
328
|
-
if (node->flags & GGML_TENSOR_FLAG_PARAM) {
|
|
329
|
-
n_param++;
|
|
330
|
-
}
|
|
331
|
-
GGML_ASSERT(!(node->flags & GGML_TENSOR_FLAG_LOSS) && "support for extra loss terms not implemented");
|
|
332
|
-
}
|
|
333
|
-
|
|
334
|
-
if (!opt_ctx->ctx_static) {
|
|
335
|
-
// The static context is used for:
|
|
336
|
-
// - gradients (1 per loss, 1 tensor per param if using gradient accumulation)
|
|
337
|
-
// - optimizer momenta (2 tensors per param)
|
|
338
|
-
// - labels (if using static graphs)
|
|
339
|
-
// - loss (if using static graphs, up to 5 tensors)
|
|
340
|
-
// - pred (if using static graphs)
|
|
341
|
-
// - ncorrect (if using static graphs, 2 tensors).
|
|
342
|
-
constexpr size_t n_loss = 1;
|
|
343
|
-
const size_t tensors_per_param = (accumulate ? 1 : 0) +
|
|
344
|
-
(opt_ctx->build_type_alloc == GGML_OPT_BUILD_TYPE_OPT ? 2 : 0);
|
|
345
|
-
const size_t tensors_const = opt_ctx->static_graphs ? 9 : 0;
|
|
346
|
-
const size_t size_meta = (n_loss + tensors_per_param*n_param + tensors_const) * ggml_tensor_overhead();
|
|
347
|
-
struct ggml_init_params params = {
|
|
348
|
-
/*.mem_size =*/ size_meta,
|
|
349
|
-
/*.mem_buffer =*/ nullptr,
|
|
350
|
-
/*.no_alloc =*/ true,
|
|
351
|
-
};
|
|
352
|
-
opt_ctx->ctx_static = ggml_init(params);
|
|
353
|
-
}
|
|
354
|
-
GGML_ASSERT(opt_ctx->build_type <= opt_ctx->build_type_alloc);
|
|
355
|
-
|
|
356
|
-
{
|
|
357
|
-
// The cpu context is allocated statically if using static graphs, dynamically otherwise.
|
|
358
|
-
// It is used for:
|
|
359
|
-
// - optimizer parameters (1 shared for all optimizer invocations)
|
|
360
|
-
const size_t size_meta = 1 * ggml_tensor_overhead();
|
|
361
|
-
struct ggml_init_params params = {
|
|
362
|
-
/*.mem_size =*/ size_meta,
|
|
363
|
-
/*.mem_buffer =*/ nullptr,
|
|
364
|
-
/*.no_alloc =*/ true,
|
|
365
|
-
};
|
|
366
|
-
ggml_free(opt_ctx->ctx_cpu);
|
|
367
|
-
opt_ctx->ctx_cpu = ggml_init(params);
|
|
368
|
-
|
|
369
|
-
ggml_backend_buffer_free(opt_ctx->buf_cpu);
|
|
370
|
-
opt_ctx->buf_cpu = nullptr;
|
|
371
|
-
}
|
|
372
|
-
|
|
373
|
-
struct ggml_context * ctx_results = opt_ctx->static_graphs ? opt_ctx->ctx_static : opt_ctx->ctx_compute;
|
|
374
|
-
|
|
375
|
-
switch (opt_ctx->loss_type) {
|
|
376
|
-
case GGML_OPT_LOSS_TYPE_MEAN: {
|
|
377
|
-
opt_ctx->loss = ggml_sum(ctx_results, opt_ctx->outputs);
|
|
378
|
-
ggml_set_name(opt_ctx->loss, "loss_sum");
|
|
379
|
-
const float scale = 1.0f / (opt_ctx->opt_period * ggml_nelements(opt_ctx->outputs));
|
|
380
|
-
opt_ctx->loss = ggml_scale(ctx_results, opt_ctx->loss, scale);
|
|
381
|
-
ggml_set_name(opt_ctx->loss, "loss_mean");
|
|
382
|
-
opt_ctx->loss_per_datapoint = true;
|
|
383
|
-
break;
|
|
384
|
-
}
|
|
385
|
-
case GGML_OPT_LOSS_TYPE_SUM: {
|
|
386
|
-
opt_ctx->loss = ggml_sum(ctx_results, opt_ctx->outputs);
|
|
387
|
-
ggml_set_name(opt_ctx->loss, "loss_sum");
|
|
388
|
-
opt_ctx->loss_per_datapoint = false;
|
|
389
|
-
break;
|
|
390
|
-
}
|
|
391
|
-
case GGML_OPT_LOSS_TYPE_CROSS_ENTROPY: {
|
|
392
|
-
opt_ctx->labels = ggml_dup_tensor(ctx_results, opt_ctx->outputs);
|
|
393
|
-
ggml_set_input(opt_ctx->labels);
|
|
394
|
-
ggml_set_name(opt_ctx->labels, "labels");
|
|
395
|
-
opt_ctx->loss = ggml_cross_entropy_loss(ctx_results, opt_ctx->outputs, opt_ctx->labels);
|
|
396
|
-
ggml_set_name(opt_ctx->loss, "loss_cross_entropy");
|
|
397
|
-
if (opt_ctx->opt_period > 1) {
|
|
398
|
-
opt_ctx->loss = ggml_scale(ctx_results, opt_ctx->loss, 1.0f / opt_ctx->opt_period);
|
|
399
|
-
ggml_set_name(opt_ctx->loss, "loss_cross_entropy_scaled");
|
|
400
|
-
}
|
|
401
|
-
opt_ctx->loss_per_datapoint = true;
|
|
402
|
-
break;
|
|
403
|
-
}
|
|
404
|
-
case GGML_OPT_LOSS_TYPE_MEAN_SQUARED_ERROR: {
|
|
405
|
-
opt_ctx->labels = ggml_dup_tensor(ctx_results, opt_ctx->outputs);
|
|
406
|
-
ggml_set_input(opt_ctx->labels);
|
|
407
|
-
ggml_set_name(opt_ctx->labels, "labels");
|
|
408
|
-
opt_ctx->loss = ggml_sub(ctx_results, opt_ctx->outputs, opt_ctx->labels);
|
|
409
|
-
ggml_set_name(opt_ctx->loss, "loss_error");
|
|
410
|
-
opt_ctx->loss = ggml_sqr(ctx_results, opt_ctx->loss);
|
|
411
|
-
ggml_set_name(opt_ctx->loss, "loss_squared_error");
|
|
412
|
-
opt_ctx->loss = ggml_sum(ctx_results, opt_ctx->loss);
|
|
413
|
-
ggml_set_name(opt_ctx->loss, "loss_sum_squared_error");
|
|
414
|
-
const float scale = 1.0f / (opt_ctx->opt_period * ggml_nelements(opt_ctx->outputs));
|
|
415
|
-
opt_ctx->loss = ggml_scale(ctx_results, opt_ctx->loss, scale);
|
|
416
|
-
ggml_set_name(opt_ctx->loss, "loss_mean_squared_error");
|
|
417
|
-
opt_ctx->loss_per_datapoint = true;
|
|
418
|
-
break;
|
|
419
|
-
}
|
|
420
|
-
}
|
|
421
|
-
ggml_set_output(opt_ctx->loss);
|
|
422
|
-
ggml_set_loss(opt_ctx->loss);
|
|
423
|
-
ggml_build_forward_expand(opt_ctx->gf, opt_ctx->loss);
|
|
424
|
-
|
|
425
|
-
if (opt_ctx->loss_type == GGML_OPT_LOSS_TYPE_CROSS_ENTROPY) {
|
|
426
|
-
opt_ctx->pred = ggml_argmax(ctx_results, opt_ctx->outputs);
|
|
427
|
-
ggml_set_name(opt_ctx->pred, "pred");
|
|
428
|
-
ggml_set_output(opt_ctx->pred);
|
|
429
|
-
ggml_build_forward_expand(opt_ctx->gf, opt_ctx->pred);
|
|
430
|
-
|
|
431
|
-
opt_ctx->ncorrect = ggml_count_equal(ctx_results, opt_ctx->pred, ggml_argmax(ctx_results, opt_ctx->labels));
|
|
432
|
-
ggml_set_name(opt_ctx->ncorrect, "ncorrect");
|
|
433
|
-
ggml_set_output(opt_ctx->ncorrect);
|
|
434
|
-
ggml_build_forward_expand(opt_ctx->gf, opt_ctx->ncorrect);
|
|
435
|
-
}
|
|
436
|
-
|
|
437
|
-
if (opt_ctx->buf_static) {
|
|
438
|
-
if (opt_ctx->build_type == GGML_OPT_BUILD_TYPE_FORWARD) {
|
|
439
|
-
return;
|
|
440
|
-
}
|
|
441
|
-
} else if (opt_ctx->build_type_alloc == GGML_OPT_BUILD_TYPE_FORWARD) {
|
|
442
|
-
opt_ctx->buf_static = ggml_backend_alloc_ctx_tensors(
|
|
443
|
-
opt_ctx->ctx_static, ggml_backend_sched_get_backend(opt_ctx->backend_sched, 0));
|
|
444
|
-
return;
|
|
445
|
-
}
|
|
446
|
-
|
|
447
|
-
if (opt_ctx->grad_accs.empty()) {
|
|
448
|
-
GGML_ASSERT(opt_ctx->build_type_alloc >= GGML_OPT_BUILD_TYPE_GRAD);
|
|
449
|
-
|
|
450
|
-
const int n_nodes = opt_ctx->gf->n_nodes;
|
|
451
|
-
opt_ctx->grad_accs.resize(n_nodes);
|
|
452
|
-
for (int i = 0; i < n_nodes; ++i) {
|
|
453
|
-
ggml_tensor * node = opt_ctx->gf->nodes[i];
|
|
454
|
-
if ((accumulate && (node->flags & GGML_TENSOR_FLAG_PARAM)) || (node->flags & GGML_TENSOR_FLAG_LOSS)) {
|
|
455
|
-
opt_ctx->grad_accs[i] = ggml_new_tensor(opt_ctx->ctx_static, GGML_TYPE_F32, GGML_MAX_DIMS, node->ne);
|
|
456
|
-
} else {
|
|
457
|
-
opt_ctx->grad_accs[i] = nullptr;
|
|
458
|
-
}
|
|
459
|
-
}
|
|
460
|
-
|
|
461
|
-
if (opt_ctx->build_type_alloc >= GGML_OPT_BUILD_TYPE_OPT) {
|
|
462
|
-
opt_ctx->grad_m.resize(n_nodes);
|
|
463
|
-
opt_ctx->grad_v.resize(n_nodes);
|
|
464
|
-
for (int i = 0; i < n_nodes; ++i) {
|
|
465
|
-
ggml_tensor * node = opt_ctx->gf->nodes[i];
|
|
466
|
-
if (node->flags & GGML_TENSOR_FLAG_PARAM) {
|
|
467
|
-
opt_ctx->grad_m[i] = ggml_new_tensor(opt_ctx->ctx_static, GGML_TYPE_F32, GGML_MAX_DIMS, node->ne);
|
|
468
|
-
opt_ctx->grad_v[i] = ggml_new_tensor(opt_ctx->ctx_static, GGML_TYPE_F32, GGML_MAX_DIMS, node->ne);
|
|
469
|
-
} else {
|
|
470
|
-
opt_ctx->grad_m[i] = nullptr;
|
|
471
|
-
opt_ctx->grad_v[i] = nullptr;
|
|
472
|
-
}
|
|
473
|
-
}
|
|
474
|
-
}
|
|
475
|
-
}
|
|
476
|
-
|
|
477
|
-
// gb_grad == graph backward gradients, forward pass, then backward pass to calculate gradients.
|
|
478
|
-
opt_ctx->gb_grad = ggml_graph_dup(opt_ctx->ctx_compute, opt_ctx->gf, /*force_grads =*/ true);
|
|
479
|
-
ggml_build_backward_expand(opt_ctx->ctx_compute, opt_ctx->gb_grad, opt_ctx->grad_accs.data());
|
|
480
|
-
|
|
481
|
-
if (opt_ctx->buf_static) {
|
|
482
|
-
if (opt_ctx->build_type == GGML_OPT_BUILD_TYPE_GRAD) {
|
|
483
|
-
return;
|
|
484
|
-
}
|
|
485
|
-
} else if (opt_ctx->build_type_alloc == GGML_OPT_BUILD_TYPE_GRAD) {
|
|
486
|
-
opt_ctx->buf_static = ggml_backend_alloc_ctx_tensors(opt_ctx->ctx_static, ggml_backend_sched_get_backend(opt_ctx->backend_sched, 0));
|
|
487
|
-
ggml_graph_reset(opt_ctx->gb_grad);
|
|
488
|
-
}
|
|
489
|
-
|
|
490
|
-
GGML_ASSERT(opt_ctx->build_type_alloc == GGML_OPT_BUILD_TYPE_OPT);
|
|
491
|
-
|
|
492
|
-
// gb_opt == graph backward optimize, forward pass, then backward pass to calculate gradients, then optimizer step.
|
|
493
|
-
opt_ctx->gb_opt = ggml_graph_dup(opt_ctx->ctx_compute, opt_ctx->gb_grad, /*force_grads =*/ true);
|
|
494
|
-
|
|
495
|
-
opt_ctx->adamw_params = ggml_new_tensor_1d(opt_ctx->ctx_cpu, GGML_TYPE_F32, 7);
|
|
496
|
-
ggml_set_input(opt_ctx->adamw_params);
|
|
497
|
-
ggml_set_name(opt_ctx->adamw_params, "adamw_params");
|
|
498
|
-
|
|
499
|
-
for (int i = opt_ctx->gf->n_nodes-1; i >= 0; --i) {
|
|
500
|
-
struct ggml_tensor * node = opt_ctx->gb_opt->nodes[i];
|
|
501
|
-
struct ggml_tensor * grad = ggml_graph_get_grad(opt_ctx->gb_opt, node);
|
|
502
|
-
|
|
503
|
-
if (grad && (node->flags & GGML_TENSOR_FLAG_PARAM)) {
|
|
504
|
-
struct ggml_tensor * m = opt_ctx->grad_m[i];
|
|
505
|
-
struct ggml_tensor * v = opt_ctx->grad_v[i];
|
|
506
|
-
struct ggml_tensor * opt_step = ggml_opt_step_adamw(opt_ctx->ctx_compute, node, grad, m, v, opt_ctx->adamw_params);
|
|
507
|
-
|
|
508
|
-
ggml_set_name(m, (std::string("AdamW m for ") + std::string(node->name)).c_str());
|
|
509
|
-
ggml_set_name(v, (std::string("AdamW v for ") + std::string(node->name)).c_str());
|
|
510
|
-
ggml_set_name(opt_step, (std::string("AdamW step for ") + std::string(node->name)).c_str());
|
|
511
|
-
|
|
512
|
-
ggml_build_forward_expand(opt_ctx->gb_opt, opt_step);
|
|
513
|
-
}
|
|
514
|
-
}
|
|
515
|
-
|
|
516
|
-
if (!opt_ctx->buf_static) {
|
|
517
|
-
opt_ctx->buf_static = ggml_backend_alloc_ctx_tensors(
|
|
518
|
-
opt_ctx->ctx_static, ggml_backend_sched_get_backend(opt_ctx->backend_sched, 0));
|
|
519
|
-
ggml_graph_reset(opt_ctx->gb_opt);
|
|
520
|
-
}
|
|
521
|
-
|
|
522
|
-
opt_ctx->buf_cpu = ggml_backend_alloc_ctx_tensors_from_buft(opt_ctx->ctx_cpu, ggml_backend_cpu_buffer_type());
|
|
523
|
-
}
|
|
524
|
-
|
|
525
|
-
ggml_opt_context_t ggml_opt_init(struct ggml_opt_params params) {
|
|
526
|
-
ggml_opt_context_t result = new struct ggml_opt_context;
|
|
527
|
-
result->backend_sched = params.backend_sched;
|
|
528
|
-
result->ctx_compute = params.ctx_compute;
|
|
529
|
-
result->loss_type = params.loss_type;
|
|
530
|
-
result->build_type = params.build_type;
|
|
531
|
-
result->build_type_alloc = params.build_type;
|
|
532
|
-
result->inputs = params.inputs;
|
|
533
|
-
result->outputs = params.outputs;
|
|
534
|
-
result->opt_period = params.opt_period;
|
|
535
|
-
result->get_opt_pars = params.get_opt_pars;
|
|
536
|
-
result->get_opt_pars_ud = params.get_opt_pars_ud;
|
|
537
|
-
|
|
538
|
-
GGML_ASSERT(result->opt_period >= 1);
|
|
539
|
-
|
|
540
|
-
result->static_graphs = result->ctx_compute;
|
|
541
|
-
|
|
542
|
-
if (!result->static_graphs) {
|
|
543
|
-
GGML_ASSERT(!result->inputs);
|
|
544
|
-
GGML_ASSERT(!result->outputs);
|
|
545
|
-
return result;
|
|
546
|
-
}
|
|
547
|
-
|
|
548
|
-
GGML_ASSERT(result->inputs);
|
|
549
|
-
GGML_ASSERT(result->outputs);
|
|
550
|
-
|
|
551
|
-
result->gf = ggml_new_graph_custom(result->ctx_compute, GGML_DEFAULT_GRAPH_SIZE, /*grads =*/ true); // Forward pass.
|
|
552
|
-
ggml_build_forward_expand(result->gf, result->outputs);
|
|
553
|
-
|
|
554
|
-
ggml_opt_build(result);
|
|
555
|
-
|
|
556
|
-
return result;
|
|
557
|
-
}
|
|
558
|
-
|
|
559
|
-
void ggml_opt_free(ggml_opt_context_t opt_ctx) {
|
|
560
|
-
if (opt_ctx == nullptr) {
|
|
561
|
-
return;
|
|
562
|
-
}
|
|
563
|
-
ggml_backend_buffer_free(opt_ctx->buf_static);
|
|
564
|
-
ggml_backend_buffer_free(opt_ctx->buf_cpu);
|
|
565
|
-
ggml_free(opt_ctx->ctx_static);
|
|
566
|
-
ggml_free(opt_ctx->ctx_cpu);
|
|
567
|
-
delete opt_ctx;
|
|
568
|
-
}
|
|
569
|
-
|
|
570
|
-
void ggml_opt_reset(ggml_opt_context_t opt_ctx, bool optimizer) {
|
|
571
|
-
if (optimizer) {
|
|
572
|
-
ggml_graph_reset(opt_ctx->gb_opt);
|
|
573
|
-
opt_ctx->iter = 1;
|
|
574
|
-
} else {
|
|
575
|
-
ggml_graph_reset(opt_ctx->gb_grad);
|
|
576
|
-
}
|
|
577
|
-
}
|
|
578
|
-
|
|
579
|
-
bool ggml_opt_static_graphs(ggml_opt_context_t opt_ctx) {
|
|
580
|
-
return opt_ctx->static_graphs;
|
|
581
|
-
}
|
|
582
|
-
|
|
583
|
-
struct ggml_tensor * ggml_opt_inputs(ggml_opt_context_t opt_ctx) {
|
|
584
|
-
return opt_ctx->inputs;
|
|
585
|
-
}
|
|
586
|
-
|
|
587
|
-
struct ggml_tensor * ggml_opt_outputs(ggml_opt_context_t opt_ctx) {
|
|
588
|
-
return opt_ctx->outputs;
|
|
589
|
-
}
|
|
590
|
-
|
|
591
|
-
struct ggml_tensor * ggml_opt_labels(ggml_opt_context_t opt_ctx) {
|
|
592
|
-
return opt_ctx->labels;
|
|
593
|
-
}
|
|
594
|
-
|
|
595
|
-
struct ggml_tensor * ggml_opt_loss(ggml_opt_context_t opt_ctx) {
|
|
596
|
-
return opt_ctx->loss;
|
|
597
|
-
}
|
|
598
|
-
|
|
599
|
-
struct ggml_tensor * ggml_opt_pred(ggml_opt_context_t opt_ctx) {
|
|
600
|
-
return opt_ctx->pred;
|
|
601
|
-
}
|
|
602
|
-
|
|
603
|
-
struct ggml_tensor * ggml_opt_ncorrect(ggml_opt_context_t opt_ctx) {
|
|
604
|
-
return opt_ctx->ncorrect;
|
|
605
|
-
}
|
|
606
|
-
|
|
607
|
-
struct ggml_tensor * ggml_opt_grad_acc(ggml_opt_context_t opt_ctx, struct ggml_tensor * node) {
|
|
608
|
-
return ggml_graph_get_grad_acc(opt_ctx->gb_opt, node);
|
|
609
|
-
}
|
|
610
|
-
|
|
611
|
-
// ====== Optimization Result ======
|
|
612
|
-
|
|
613
|
-
ggml_opt_result_t ggml_opt_result_init() {
|
|
614
|
-
return new ggml_opt_result;
|
|
615
|
-
}
|
|
616
|
-
|
|
617
|
-
void ggml_opt_result_free(ggml_opt_result_t result) {
|
|
618
|
-
delete result;
|
|
619
|
-
}
|
|
620
|
-
|
|
621
|
-
void ggml_opt_result_reset(ggml_opt_result_t result) {
|
|
622
|
-
result->ndata = 0;
|
|
623
|
-
result->loss.clear();
|
|
624
|
-
result->pred.clear();
|
|
625
|
-
result->ncorrect = 0;
|
|
626
|
-
}
|
|
627
|
-
|
|
628
|
-
void ggml_opt_result_ndata(ggml_opt_result_t result, int64_t * ndata) {
|
|
629
|
-
*ndata = result->ndata;
|
|
630
|
-
}
|
|
631
|
-
|
|
632
|
-
void ggml_opt_result_loss(ggml_opt_result_t result, double * loss, double * unc) {
|
|
633
|
-
const int64_t nbatches = result->loss.size(); // Number of physical batches.
|
|
634
|
-
|
|
635
|
-
if (nbatches == 0) {
|
|
636
|
-
*loss = 0.0;
|
|
637
|
-
*unc = NAN;
|
|
638
|
-
return;
|
|
639
|
-
}
|
|
640
|
-
|
|
641
|
-
double sum = 0.0;
|
|
642
|
-
double sum_squared = 0.0;
|
|
643
|
-
|
|
644
|
-
for (const float & loss : result->loss) {
|
|
645
|
-
// If the loss is per datapoint it was scaled by 1.0f/opt_period for each physical batch.
|
|
646
|
-
const float loss_scaled = result->loss_per_datapoint ? loss*result->opt_period : loss;
|
|
647
|
-
sum += loss_scaled;
|
|
648
|
-
sum_squared += loss_scaled*loss_scaled;
|
|
649
|
-
}
|
|
650
|
-
|
|
651
|
-
const double mean = sum/nbatches;
|
|
652
|
-
*loss = result->loss_per_datapoint ? mean : sum;
|
|
653
|
-
|
|
654
|
-
if (!unc) {
|
|
655
|
-
return;
|
|
656
|
-
}
|
|
657
|
-
|
|
658
|
-
if (nbatches < 2) {
|
|
659
|
-
*unc = NAN;
|
|
660
|
-
return;
|
|
661
|
-
}
|
|
662
|
-
|
|
663
|
-
const double var_sum = sum_squared/nbatches - mean*mean; // variance without Bessel's correction, i.e. nbatches/(nbatches-1)
|
|
664
|
-
*unc = result->loss_per_datapoint ? sqrt(var_sum / (nbatches - 1)) : sqrt(var_sum * nbatches/(nbatches - 1));
|
|
665
|
-
}
|
|
666
|
-
|
|
667
|
-
void ggml_opt_result_pred(ggml_opt_result_t result, int32_t * pred) {
|
|
668
|
-
for (size_t i = 0; i < result->pred.size(); ++i) {
|
|
669
|
-
pred[i] = result->pred[i];
|
|
670
|
-
}
|
|
671
|
-
}
|
|
672
|
-
|
|
673
|
-
void ggml_opt_result_accuracy(ggml_opt_result_t result, double * accuracy, double * unc) {
|
|
674
|
-
*accuracy = result->ncorrect >= 0 ? double(result->ncorrect) / double(result->ndata) : NAN;
|
|
675
|
-
|
|
676
|
-
if (!unc) {
|
|
677
|
-
return;
|
|
678
|
-
}
|
|
679
|
-
|
|
680
|
-
*unc = result->ncorrect >= 0 && result->ndata >= 2 ?
|
|
681
|
-
sqrt((*accuracy) * (1.0 - (*accuracy)) / double(result->ndata - 1)) : NAN;
|
|
682
|
-
}
|
|
683
|
-
|
|
684
|
-
// ====== Computation ======
|
|
685
|
-
|
|
686
|
-
void ggml_opt_prepare_alloc(
|
|
687
|
-
ggml_opt_context_t opt_ctx,
|
|
688
|
-
struct ggml_context * ctx_compute,
|
|
689
|
-
struct ggml_cgraph * gf,
|
|
690
|
-
struct ggml_tensor * inputs,
|
|
691
|
-
struct ggml_tensor * outputs) {
|
|
692
|
-
GGML_ASSERT(!opt_ctx->static_graphs);
|
|
693
|
-
opt_ctx->ctx_compute = ctx_compute;
|
|
694
|
-
opt_ctx->gf = gf;
|
|
695
|
-
opt_ctx->inputs = inputs;
|
|
696
|
-
opt_ctx->outputs = outputs;
|
|
697
|
-
}
|
|
698
|
-
|
|
699
|
-
void ggml_opt_alloc(ggml_opt_context_t opt_ctx, bool backward) {
|
|
700
|
-
GGML_ASSERT(!opt_ctx->eval_ready);
|
|
701
|
-
if (opt_ctx->build_type == GGML_OPT_BUILD_TYPE_OPT && opt_ctx->opt_period > 1 && opt_ctx->opt_i == 0) {
|
|
702
|
-
ggml_graph_reset(opt_ctx->gb_grad);
|
|
703
|
-
}
|
|
704
|
-
if (backward) {
|
|
705
|
-
const int32_t opt_i_next = (opt_ctx->opt_i + 1) % opt_ctx->opt_period;
|
|
706
|
-
opt_ctx->build_type = opt_i_next == 0 ? GGML_OPT_BUILD_TYPE_OPT : GGML_OPT_BUILD_TYPE_GRAD;
|
|
707
|
-
} else {
|
|
708
|
-
opt_ctx->build_type = GGML_OPT_BUILD_TYPE_FORWARD;
|
|
709
|
-
}
|
|
710
|
-
|
|
711
|
-
if (!opt_ctx->static_graphs) {
|
|
712
|
-
ggml_opt_build(opt_ctx);
|
|
713
|
-
}
|
|
714
|
-
|
|
715
|
-
struct ggml_cgraph * graph = nullptr;
|
|
716
|
-
switch (opt_ctx->build_type) {
|
|
717
|
-
case GGML_OPT_BUILD_TYPE_FORWARD: {
|
|
718
|
-
graph = opt_ctx->gf;
|
|
719
|
-
} break;
|
|
720
|
-
case GGML_OPT_BUILD_TYPE_GRAD: {
|
|
721
|
-
graph = opt_ctx->gb_grad;
|
|
722
|
-
} break;
|
|
723
|
-
case GGML_OPT_BUILD_TYPE_OPT: {
|
|
724
|
-
graph = opt_ctx->gb_opt;
|
|
725
|
-
} break;
|
|
726
|
-
}
|
|
727
|
-
GGML_ASSERT(graph);
|
|
728
|
-
|
|
729
|
-
if (opt_ctx->allocated_graph == graph) {
|
|
730
|
-
opt_ctx->eval_ready = true;
|
|
731
|
-
return;
|
|
732
|
-
}
|
|
733
|
-
|
|
734
|
-
ggml_backend_sched_reset(opt_ctx->backend_sched); // clear allocation of previous graph
|
|
735
|
-
|
|
736
|
-
if (opt_ctx->static_graphs) {
|
|
737
|
-
ggml_init_params params = {
|
|
738
|
-
/*.mem_size =*/ graph->size*ggml_tensor_overhead() + ggml_graph_overhead_custom(graph->size, graph->grads),
|
|
739
|
-
/*.mem_buffer =*/ nullptr,
|
|
740
|
-
/*.no_alloc =*/ true,
|
|
741
|
-
};
|
|
742
|
-
ggml_free(opt_ctx->ctx_copy);
|
|
743
|
-
opt_ctx->ctx_copy = ggml_init(params);
|
|
744
|
-
|
|
745
|
-
opt_ctx->allocated_graph_copy = dup_graph(opt_ctx->ctx_copy, graph);
|
|
746
|
-
} else {
|
|
747
|
-
opt_ctx->allocated_graph_copy = graph;
|
|
748
|
-
}
|
|
749
|
-
|
|
750
|
-
ggml_backend_sched_alloc_graph(opt_ctx->backend_sched, opt_ctx->allocated_graph_copy);
|
|
751
|
-
opt_ctx->allocated_graph = graph;
|
|
752
|
-
|
|
753
|
-
opt_ctx->eval_ready = true;
|
|
754
|
-
}
|
|
755
|
-
|
|
756
|
-
void ggml_opt_eval(ggml_opt_context_t opt_ctx, ggml_opt_result_t result) {
|
|
757
|
-
GGML_ASSERT(opt_ctx->eval_ready);
|
|
758
|
-
if (opt_ctx->allocated_graph == opt_ctx->gb_opt) {
|
|
759
|
-
struct ggml_opt_optimizer_params opt_pars = opt_ctx->get_opt_pars(opt_ctx->get_opt_pars_ud);
|
|
760
|
-
|
|
761
|
-
GGML_ASSERT(opt_pars.adamw.alpha > 0.0f);
|
|
762
|
-
GGML_ASSERT(opt_pars.adamw.beta1 >= 0.0f);
|
|
763
|
-
GGML_ASSERT(opt_pars.adamw.beta1 <= 1.0f);
|
|
764
|
-
GGML_ASSERT(opt_pars.adamw.beta2 >= 0.0f);
|
|
765
|
-
GGML_ASSERT(opt_pars.adamw.beta2 <= 1.0f);
|
|
766
|
-
GGML_ASSERT(opt_pars.adamw.eps >= 0.0f);
|
|
767
|
-
GGML_ASSERT(opt_pars.adamw.wd >= 0.0f);
|
|
768
|
-
GGML_ASSERT(opt_pars.adamw.wd <= 1.0f);
|
|
769
|
-
|
|
770
|
-
// beta1, beta2 after applying warmup
|
|
771
|
-
const float beta1h = 1.0f/(1.0f - powf(opt_pars.adamw.beta1, opt_ctx->iter));
|
|
772
|
-
const float beta2h = 1.0f/(1.0f - powf(opt_pars.adamw.beta2, opt_ctx->iter));
|
|
773
|
-
|
|
774
|
-
float * adamw_par_data = ggml_get_data_f32(opt_ctx->adamw_params);
|
|
775
|
-
adamw_par_data[0] = opt_pars.adamw.alpha;
|
|
776
|
-
adamw_par_data[1] = opt_pars.adamw.beta1;
|
|
777
|
-
adamw_par_data[2] = opt_pars.adamw.beta2;
|
|
778
|
-
adamw_par_data[3] = opt_pars.adamw.eps;
|
|
779
|
-
adamw_par_data[4] = opt_pars.adamw.wd;
|
|
780
|
-
adamw_par_data[5] = beta1h;
|
|
781
|
-
adamw_par_data[6] = beta2h;
|
|
782
|
-
}
|
|
783
|
-
|
|
784
|
-
ggml_backend_sched_graph_compute(opt_ctx->backend_sched, opt_ctx->allocated_graph_copy);
|
|
785
|
-
opt_ctx->iter += opt_ctx->allocated_graph == opt_ctx->gb_opt;
|
|
786
|
-
opt_ctx->opt_i = (opt_ctx->opt_i + 1) % opt_ctx->opt_period;
|
|
787
|
-
|
|
788
|
-
if (!opt_ctx->static_graphs) {
|
|
789
|
-
opt_ctx->gf = nullptr;
|
|
790
|
-
opt_ctx->gb_grad = nullptr;
|
|
791
|
-
opt_ctx->gb_opt = nullptr;
|
|
792
|
-
opt_ctx->allocated_graph = nullptr;
|
|
793
|
-
opt_ctx->allocated_graph_copy = nullptr;
|
|
794
|
-
}
|
|
795
|
-
|
|
796
|
-
opt_ctx->eval_ready = false;
|
|
797
|
-
|
|
798
|
-
if (!result) {
|
|
799
|
-
return;
|
|
800
|
-
}
|
|
801
|
-
|
|
802
|
-
if (result->ndata == 0) {
|
|
803
|
-
result->loss_per_datapoint = opt_ctx->loss_per_datapoint;
|
|
804
|
-
result->opt_period = opt_ctx->opt_period;
|
|
805
|
-
} else {
|
|
806
|
-
GGML_ASSERT(result->loss_per_datapoint == opt_ctx->loss_per_datapoint);
|
|
807
|
-
GGML_ASSERT(result->opt_period == opt_ctx->opt_period);
|
|
808
|
-
}
|
|
809
|
-
|
|
810
|
-
const int64_t ndata = opt_ctx->outputs->ne[1];
|
|
811
|
-
GGML_ASSERT(result->ndata == ndata*int64_t(result->loss.size()) && "varying batch size not supported");
|
|
812
|
-
result->ndata += ndata;
|
|
813
|
-
|
|
814
|
-
GGML_ASSERT(ggml_is_scalar(opt_ctx->loss));
|
|
815
|
-
GGML_ASSERT(opt_ctx->loss->type == GGML_TYPE_F32);
|
|
816
|
-
float loss;
|
|
817
|
-
ggml_backend_tensor_get(opt_ctx->loss, &loss, 0, ggml_nbytes(opt_ctx->loss));
|
|
818
|
-
result->loss.push_back(loss);
|
|
819
|
-
|
|
820
|
-
if (opt_ctx->pred) {
|
|
821
|
-
GGML_ASSERT(opt_ctx->pred->type == GGML_TYPE_I32);
|
|
822
|
-
std::vector<int32_t> pred(ndata);
|
|
823
|
-
ggml_backend_tensor_get(opt_ctx->pred, pred.data(), 0, ggml_nbytes(opt_ctx->pred));
|
|
824
|
-
result->pred.insert(result->pred.end(), pred.begin(), pred.end());
|
|
825
|
-
}
|
|
826
|
-
|
|
827
|
-
if (!opt_ctx->ncorrect || result->ncorrect < 0) {
|
|
828
|
-
result->ncorrect = -1;
|
|
829
|
-
return;
|
|
830
|
-
}
|
|
831
|
-
|
|
832
|
-
GGML_ASSERT(ggml_is_scalar(opt_ctx->ncorrect));
|
|
833
|
-
GGML_ASSERT(opt_ctx->ncorrect->type == GGML_TYPE_I64);
|
|
834
|
-
int64_t ncorrect;
|
|
835
|
-
ggml_backend_tensor_get(opt_ctx->ncorrect, &ncorrect, 0, ggml_nbytes(opt_ctx->ncorrect));
|
|
836
|
-
result->ncorrect += ncorrect;
|
|
837
|
-
}
|
|
838
|
-
|
|
839
|
-
// ====== High-Level Functions ======
|
|
840
|
-
|
|
841
|
-
void ggml_opt_epoch(
|
|
842
|
-
ggml_opt_context_t opt_ctx,
|
|
843
|
-
ggml_opt_dataset_t dataset,
|
|
844
|
-
ggml_opt_result_t result_train,
|
|
845
|
-
ggml_opt_result_t result_eval,
|
|
846
|
-
int64_t idata_split,
|
|
847
|
-
ggml_opt_epoch_callback callback_train,
|
|
848
|
-
ggml_opt_epoch_callback callback_eval) {
|
|
849
|
-
GGML_ASSERT(ggml_opt_static_graphs(opt_ctx) && "ggml_opt_epoch requires static graphs");
|
|
850
|
-
struct ggml_tensor * inputs = ggml_opt_inputs(opt_ctx);
|
|
851
|
-
struct ggml_tensor * labels = ggml_opt_labels(opt_ctx);
|
|
852
|
-
struct ggml_tensor * data = ggml_opt_dataset_data(dataset);
|
|
853
|
-
GGML_ASSERT(data->ne[0] == inputs->ne[0]);
|
|
854
|
-
|
|
855
|
-
const int64_t ndata = data->ne[1];
|
|
856
|
-
const int64_t ndata_batch = inputs->ne[1];
|
|
857
|
-
|
|
858
|
-
GGML_ASSERT(data->ne[1] % inputs->ne[1] == 0);
|
|
859
|
-
const int64_t nbatches = ndata/ndata_batch;
|
|
860
|
-
|
|
861
|
-
idata_split = idata_split < 0 ? ndata : idata_split;
|
|
862
|
-
GGML_ASSERT(idata_split % ndata_batch == 0);
|
|
863
|
-
const int64_t ibatch_split = idata_split / ndata_batch;
|
|
864
|
-
|
|
865
|
-
int64_t ibatch = 0;
|
|
866
|
-
int64_t t_loop_start = ggml_time_us();
|
|
867
|
-
for (; ibatch < ibatch_split; ++ibatch) {
|
|
868
|
-
ggml_opt_alloc(opt_ctx, /*backward =*/ true);
|
|
869
|
-
ggml_opt_dataset_get_batch(dataset, inputs, labels, ibatch);
|
|
870
|
-
ggml_opt_eval(opt_ctx, result_train);
|
|
871
|
-
if (callback_train) {
|
|
872
|
-
callback_train(true, opt_ctx, dataset, result_train, ibatch+1, ibatch_split, t_loop_start);
|
|
873
|
-
}
|
|
874
|
-
}
|
|
875
|
-
t_loop_start = ggml_time_us();
|
|
876
|
-
for (; ibatch < nbatches; ++ibatch) {
|
|
877
|
-
ggml_opt_alloc(opt_ctx, /*backward =*/ false);
|
|
878
|
-
ggml_opt_dataset_get_batch(dataset, inputs, labels, ibatch);
|
|
879
|
-
ggml_opt_eval(opt_ctx, result_eval);
|
|
880
|
-
if (callback_eval) {
|
|
881
|
-
callback_eval(false, opt_ctx, dataset, result_eval, ibatch+1-ibatch_split, nbatches-ibatch_split, t_loop_start);
|
|
882
|
-
}
|
|
883
|
-
}
|
|
884
|
-
}
|
|
885
|
-
|
|
886
|
-
void ggml_opt_epoch_callback_progress_bar(
|
|
887
|
-
bool train,
|
|
888
|
-
ggml_opt_context_t opt_ctx,
|
|
889
|
-
ggml_opt_dataset_t dataset,
|
|
890
|
-
ggml_opt_result_t result,
|
|
891
|
-
int64_t ibatch,
|
|
892
|
-
int64_t ibatch_max,
|
|
893
|
-
int64_t t_start_us) {
|
|
894
|
-
fprintf(stderr, "%s[", train ? "train: " : "val: ");
|
|
895
|
-
|
|
896
|
-
// The progress bar consists of partially filled blocks, unicode has 8 separate fill levels.
|
|
897
|
-
constexpr int64_t bar_length = 8;
|
|
898
|
-
const int64_t ibatch8 = 8 * ibatch;
|
|
899
|
-
for (int64_t j = 0; j < bar_length; ++j) {
|
|
900
|
-
if (ibatch_max * (8*j + 8) / bar_length < ibatch8) {
|
|
901
|
-
fprintf(stderr, "\u2588"); // full block
|
|
902
|
-
} else if (ibatch_max * (8*j + 7) / bar_length < ibatch8) {
|
|
903
|
-
fprintf(stderr, "\u2589"); // 7/8 filled
|
|
904
|
-
} else if (ibatch_max * (8*j + 6) / bar_length < ibatch8) {
|
|
905
|
-
fprintf(stderr, "\u258A"); // 6/8 filled
|
|
906
|
-
} else if (ibatch_max * (8*j + 5) / bar_length < ibatch8) {
|
|
907
|
-
fprintf(stderr, "\u258B"); // 5/8 filled
|
|
908
|
-
} else if (ibatch_max * (8*j + 4) / bar_length < ibatch8) {
|
|
909
|
-
fprintf(stderr, "\u258C"); // 4/8 filled
|
|
910
|
-
} else if (ibatch_max * (8*j + 3) / bar_length < ibatch8) {
|
|
911
|
-
fprintf(stderr, "\u258D"); // 3/8 filled
|
|
912
|
-
} else if (ibatch_max * (8*j + 2) / bar_length < ibatch8) {
|
|
913
|
-
fprintf(stderr, "\u258E"); // 2/8 filled
|
|
914
|
-
} else if (ibatch_max * (8*j + 1) / bar_length < ibatch8) {
|
|
915
|
-
fprintf(stderr, "\u258F"); // 1/8 filled
|
|
916
|
-
} else {
|
|
917
|
-
fprintf(stderr, " ");
|
|
918
|
-
}
|
|
919
|
-
}
|
|
920
|
-
|
|
921
|
-
const int64_t batch_size = ggml_opt_inputs(opt_ctx)->ne[1];
|
|
922
|
-
const int64_t idata = ibatch*batch_size;
|
|
923
|
-
const int64_t idata_max = ibatch_max*batch_size;
|
|
924
|
-
|
|
925
|
-
double loss;
|
|
926
|
-
double loss_unc;
|
|
927
|
-
ggml_opt_result_loss(result, &loss, &loss_unc);
|
|
928
|
-
|
|
929
|
-
double accuracy;
|
|
930
|
-
double accuracy_unc;
|
|
931
|
-
ggml_opt_result_accuracy(result, &accuracy, &accuracy_unc);
|
|
932
|
-
|
|
933
|
-
const int64_t t_ibatch_us = ggml_time_us() - t_start_us;
|
|
934
|
-
int64_t t_ibatch_s = t_ibatch_us / 1000000;
|
|
935
|
-
const int64_t t_ibatch_h = t_ibatch_s / 3600;
|
|
936
|
-
t_ibatch_s -= t_ibatch_h * 3600;
|
|
937
|
-
const int64_t t_ibatch_m = t_ibatch_s / 60;
|
|
938
|
-
t_ibatch_s -= t_ibatch_m * 60;
|
|
939
|
-
|
|
940
|
-
const int64_t t_eta_us = t_ibatch_us * (ibatch_max - ibatch)/ibatch;
|
|
941
|
-
int64_t t_eta_s = t_eta_us / 1000000;
|
|
942
|
-
const int64_t t_eta_h = t_eta_s / 3600;
|
|
943
|
-
t_eta_s -= t_eta_h * 3600;
|
|
944
|
-
const int64_t t_eta_m = t_eta_s / 60;
|
|
945
|
-
t_eta_s -= t_eta_m * 60;
|
|
946
|
-
|
|
947
|
-
fprintf(stderr, "] data=%07" PRId64 "/%07" PRId64 " loss=%.5lf±%.5lf acc=%.2lf±%.2lf%% "
|
|
948
|
-
"t=%02" PRId64 ":%02" PRId64 ":%02" PRId64 " ETA=%02" PRId64 ":%02" PRId64 ":%02" PRId64 " \r",
|
|
949
|
-
idata, idata_max, loss, loss_unc, 100.0*accuracy, 100.0*accuracy_unc,
|
|
950
|
-
t_ibatch_h, t_ibatch_m, t_ibatch_s, t_eta_h, t_eta_m, t_eta_s);
|
|
951
|
-
if (ibatch == ibatch_max) {
|
|
952
|
-
fprintf(stderr, "\n");
|
|
953
|
-
}
|
|
954
|
-
fflush(stderr);
|
|
955
|
-
|
|
956
|
-
GGML_UNUSED(dataset);
|
|
957
|
-
}
|
|
958
|
-
|
|
959
|
-
void ggml_opt_fit(
|
|
960
|
-
ggml_backend_sched_t backend_sched,
|
|
961
|
-
ggml_context * ctx_compute,
|
|
962
|
-
ggml_tensor * inputs,
|
|
963
|
-
ggml_tensor * outputs,
|
|
964
|
-
ggml_opt_dataset_t dataset,
|
|
965
|
-
enum ggml_opt_loss_type loss_type,
|
|
966
|
-
ggml_opt_get_optimizer_params get_opt_pars,
|
|
967
|
-
int64_t nepoch,
|
|
968
|
-
int64_t nbatch_logical,
|
|
969
|
-
float val_split,
|
|
970
|
-
bool silent) {
|
|
971
|
-
ggml_time_init();
|
|
972
|
-
const int64_t t_start_us = ggml_time_us();
|
|
973
|
-
|
|
974
|
-
const int64_t ndata = ggml_opt_dataset_data(dataset)->ne[1];
|
|
975
|
-
const int64_t nbatch_physical = inputs->ne[1];
|
|
976
|
-
GGML_ASSERT(ndata % nbatch_logical == 0);
|
|
977
|
-
GGML_ASSERT(nbatch_logical % nbatch_physical == 0);
|
|
978
|
-
|
|
979
|
-
const int64_t opt_period = nbatch_logical / nbatch_physical;
|
|
980
|
-
const int64_t nbatches_logical = ndata / nbatch_logical;
|
|
981
|
-
|
|
982
|
-
GGML_ASSERT(val_split >= 0.0f);
|
|
983
|
-
GGML_ASSERT(val_split < 1.0f);
|
|
984
|
-
const int64_t ibatch_split = int64_t(((1.0f - val_split) * nbatches_logical)) * opt_period; // train <-> val split index (physical)
|
|
985
|
-
const int64_t idata_split = ibatch_split * nbatch_physical;
|
|
986
|
-
|
|
987
|
-
int64_t epoch = 1;
|
|
988
|
-
|
|
989
|
-
ggml_opt_params params = ggml_opt_default_params(backend_sched, loss_type);
|
|
990
|
-
params.ctx_compute = ctx_compute;
|
|
991
|
-
params.inputs = inputs;
|
|
992
|
-
params.outputs = outputs;
|
|
993
|
-
params.opt_period = opt_period;
|
|
994
|
-
params.get_opt_pars = get_opt_pars;
|
|
995
|
-
params.get_opt_pars_ud = &epoch;
|
|
996
|
-
ggml_opt_context_t opt_ctx = ggml_opt_init(params);
|
|
997
|
-
|
|
998
|
-
// Shuffling the data is generally useful but there is only a point if not all data is used in a single batch.
|
|
999
|
-
if (nbatch_logical < ndata) {
|
|
1000
|
-
ggml_opt_dataset_shuffle(opt_ctx, dataset, -1); // Shuffle all data (train + validation).
|
|
1001
|
-
}
|
|
1002
|
-
|
|
1003
|
-
ggml_opt_result_t result_train = ggml_opt_result_init();
|
|
1004
|
-
ggml_opt_result_t result_val = ggml_opt_result_init();
|
|
1005
|
-
|
|
1006
|
-
ggml_opt_epoch_callback epoch_callback = silent ? nullptr : ggml_opt_epoch_callback_progress_bar;
|
|
1007
|
-
|
|
1008
|
-
for (; epoch <= nepoch; ++epoch) {
|
|
1009
|
-
if (nbatch_logical < idata_split) {
|
|
1010
|
-
ggml_opt_dataset_shuffle(opt_ctx, dataset, idata_split);
|
|
1011
|
-
}
|
|
1012
|
-
|
|
1013
|
-
ggml_opt_result_reset(result_train);
|
|
1014
|
-
ggml_opt_result_reset(result_val);
|
|
1015
|
-
|
|
1016
|
-
if (!silent) {
|
|
1017
|
-
fprintf(stderr, "%s: epoch %04" PRId64 "/%04" PRId64 ":\n", __func__, epoch, nepoch);
|
|
1018
|
-
}
|
|
1019
|
-
ggml_opt_epoch(opt_ctx, dataset, result_train, result_val, idata_split, epoch_callback, epoch_callback);
|
|
1020
|
-
if (!silent) {
|
|
1021
|
-
fprintf(stderr, "\n");
|
|
1022
|
-
}
|
|
1023
|
-
}
|
|
1024
|
-
|
|
1025
|
-
if (!silent) {
|
|
1026
|
-
int64_t t_total_s = (ggml_time_us() - t_start_us) / 1000000;
|
|
1027
|
-
const int64_t t_total_h = t_total_s / 3600;
|
|
1028
|
-
t_total_s -= t_total_h * 3600;
|
|
1029
|
-
const int64_t t_total_m = t_total_s / 60;
|
|
1030
|
-
t_total_s -= t_total_m * 60;
|
|
1031
|
-
fprintf(stderr, "%s: training took %02" PRId64 ":%02" PRId64 ":%02" PRId64 "\n", __func__, t_total_h, t_total_m, t_total_s);
|
|
1032
|
-
}
|
|
1033
|
-
|
|
1034
|
-
ggml_opt_free(opt_ctx);
|
|
1035
|
-
ggml_opt_result_free(result_train);
|
|
1036
|
-
ggml_opt_result_free(result_val);
|
|
1037
|
-
}
|