@novastera-oss/llamarn 0.0.1-alpha.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/INTERFACE.md +389 -0
- package/LICENSE +201 -0
- package/README.md +235 -0
- package/RNLlamaCpp.podspec +69 -0
- package/android/CMakeLists.txt +107 -0
- package/android/build.gradle +111 -0
- package/android/generated/java/com/novastera/llamarn/NativeRNLlamaCppSpec.java +47 -0
- package/android/generated/jni/CMakeLists.txt +36 -0
- package/android/generated/jni/RNLlamaCppSpec-generated.cpp +44 -0
- package/android/generated/jni/RNLlamaCppSpec.h +31 -0
- package/android/generated/jni/react/renderer/components/RNLlamaCppSpec/RNLlamaCppSpecJSI-generated.cpp +42 -0
- package/android/generated/jni/react/renderer/components/RNLlamaCppSpec/RNLlamaCppSpecJSI.h +336 -0
- package/android/gradle.properties +5 -0
- package/android/src/main/AndroidManifest.xml +3 -0
- package/android/src/main/AndroidManifestNew.xml +2 -0
- package/android/src/main/cpp/include/llama-cpp.h +30 -0
- package/android/src/main/cpp/include/llama.h +1440 -0
- package/android/src/main/java/com/novastera/llamarn/RNLlamaCppPackage.kt +21 -0
- package/android/src/main/jniLibs/arm64-v8a/libOpenCL.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/libggml-base.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/libggml-cpu.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/libggml.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/libllama.so +0 -0
- package/android/src/main/jniLibs/x86_64/libOpenCL.so +0 -0
- package/android/src/main/jniLibs/x86_64/libggml-base.so +0 -0
- package/android/src/main/jniLibs/x86_64/libggml-cpu.so +0 -0
- package/android/src/main/jniLibs/x86_64/libggml.so +0 -0
- package/android/src/main/jniLibs/x86_64/libllama.so +0 -0
- package/cpp/LlamaCppModel.cpp +984 -0
- package/cpp/LlamaCppModel.h +162 -0
- package/cpp/PureCppImpl.cpp +308 -0
- package/cpp/PureCppImpl.h +59 -0
- package/cpp/SystemUtils.cpp +180 -0
- package/cpp/SystemUtils.h +74 -0
- package/cpp/build-info.cpp +4 -0
- package/cpp/llama.cpp/AUTHORS +1106 -0
- package/cpp/llama.cpp/CMakeLists.txt +254 -0
- package/cpp/llama.cpp/CMakePresets.json +84 -0
- package/cpp/llama.cpp/CODEOWNERS +11 -0
- package/cpp/llama.cpp/CONTRIBUTING.md +127 -0
- package/cpp/llama.cpp/LICENSE +21 -0
- package/cpp/llama.cpp/Makefile +1608 -0
- package/cpp/llama.cpp/README.md +575 -0
- package/cpp/llama.cpp/SECURITY.md +68 -0
- package/cpp/llama.cpp/build-xcframework.sh +540 -0
- package/cpp/llama.cpp/cmake/arm64-apple-clang.cmake +16 -0
- package/cpp/llama.cpp/cmake/arm64-windows-llvm.cmake +16 -0
- package/cpp/llama.cpp/cmake/build-info.cmake +64 -0
- package/cpp/llama.cpp/cmake/common.cmake +35 -0
- package/cpp/llama.cpp/cmake/git-vars.cmake +22 -0
- package/cpp/llama.cpp/cmake/llama-config.cmake.in +30 -0
- package/cpp/llama.cpp/cmake/llama.pc.in +10 -0
- package/cpp/llama.cpp/cmake/x64-windows-llvm.cmake +5 -0
- package/cpp/llama.cpp/common/CMakeLists.txt +170 -0
- package/cpp/llama.cpp/common/arg.cpp +3337 -0
- package/cpp/llama.cpp/common/arg.h +89 -0
- package/cpp/llama.cpp/common/base64.hpp +392 -0
- package/cpp/llama.cpp/common/build-info.cpp.in +4 -0
- package/cpp/llama.cpp/common/chat.cpp +1781 -0
- package/cpp/llama.cpp/common/chat.h +135 -0
- package/cpp/llama.cpp/common/cmake/build-info-gen-cpp.cmake +24 -0
- package/cpp/llama.cpp/common/common.cpp +1567 -0
- package/cpp/llama.cpp/common/common.h +668 -0
- package/cpp/llama.cpp/common/console.cpp +504 -0
- package/cpp/llama.cpp/common/console.h +19 -0
- package/cpp/llama.cpp/common/json-schema-to-grammar.cpp +1027 -0
- package/cpp/llama.cpp/common/json-schema-to-grammar.h +21 -0
- package/cpp/llama.cpp/common/json.hpp +24766 -0
- package/cpp/llama.cpp/common/llguidance.cpp +254 -0
- package/cpp/llama.cpp/common/log.cpp +393 -0
- package/cpp/llama.cpp/common/log.h +103 -0
- package/cpp/llama.cpp/common/minja/chat-template.hpp +537 -0
- package/cpp/llama.cpp/common/minja/minja.hpp +2941 -0
- package/cpp/llama.cpp/common/ngram-cache.cpp +286 -0
- package/cpp/llama.cpp/common/ngram-cache.h +101 -0
- package/cpp/llama.cpp/common/sampling.cpp +580 -0
- package/cpp/llama.cpp/common/sampling.h +107 -0
- package/cpp/llama.cpp/common/speculative.cpp +278 -0
- package/cpp/llama.cpp/common/speculative.h +28 -0
- package/cpp/llama.cpp/common/stb_image.h +7988 -0
- package/cpp/llama.cpp/convert_hf_to_gguf.py +6195 -0
- package/cpp/llama.cpp/convert_hf_to_gguf_update.py +393 -0
- package/cpp/llama.cpp/convert_llama_ggml_to_gguf.py +450 -0
- package/cpp/llama.cpp/convert_lora_to_gguf.py +461 -0
- package/cpp/llama.cpp/flake.lock +58 -0
- package/cpp/llama.cpp/flake.nix +185 -0
- package/cpp/llama.cpp/ggml/CMakeLists.txt +388 -0
- package/cpp/llama.cpp/ggml/cmake/GitVars.cmake +22 -0
- package/cpp/llama.cpp/ggml/cmake/common.cmake +26 -0
- package/cpp/llama.cpp/ggml/cmake/ggml-config.cmake.in +152 -0
- package/cpp/llama.cpp/ggml/include/ggml-alloc.h +76 -0
- package/cpp/llama.cpp/ggml/include/ggml-backend.h +354 -0
- package/cpp/llama.cpp/ggml/include/ggml-blas.h +25 -0
- package/cpp/llama.cpp/ggml/include/ggml-cann.h +123 -0
- package/cpp/llama.cpp/ggml/include/ggml-cpp.h +39 -0
- package/cpp/llama.cpp/ggml/include/ggml-cpu.h +143 -0
- package/cpp/llama.cpp/ggml/include/ggml-cuda.h +47 -0
- package/cpp/llama.cpp/ggml/include/ggml-kompute.h +50 -0
- package/cpp/llama.cpp/ggml/include/ggml-metal.h +66 -0
- package/cpp/llama.cpp/ggml/include/ggml-opencl.h +26 -0
- package/cpp/llama.cpp/ggml/include/ggml-opt.h +216 -0
- package/cpp/llama.cpp/ggml/include/ggml-rpc.h +33 -0
- package/cpp/llama.cpp/ggml/include/ggml-sycl.h +49 -0
- package/cpp/llama.cpp/ggml/include/ggml-vulkan.h +29 -0
- package/cpp/llama.cpp/ggml/include/ggml.h +2192 -0
- package/cpp/llama.cpp/ggml/include/gguf.h +202 -0
- package/cpp/llama.cpp/ggml/src/CMakeLists.txt +345 -0
- package/cpp/llama.cpp/ggml/src/ggml-alloc.c +1042 -0
- package/cpp/llama.cpp/ggml/src/ggml-backend-impl.h +255 -0
- package/cpp/llama.cpp/ggml/src/ggml-backend-reg.cpp +586 -0
- package/cpp/llama.cpp/ggml/src/ggml-backend.cpp +2008 -0
- package/cpp/llama.cpp/ggml/src/ggml-blas/CMakeLists.txt +87 -0
- package/cpp/llama.cpp/ggml/src/ggml-blas/ggml-blas.cpp +517 -0
- package/cpp/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +74 -0
- package/cpp/llama.cpp/ggml/src/ggml-cann/Doxyfile +2579 -0
- package/cpp/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +179 -0
- package/cpp/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +258 -0
- package/cpp/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +2589 -0
- package/cpp/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +1083 -0
- package/cpp/llama.cpp/ggml/src/ggml-cann/common.h +420 -0
- package/cpp/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +2554 -0
- package/cpp/llama.cpp/ggml/src/ggml-common.h +1857 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +495 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/amx/amx.cpp +221 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/amx/amx.h +8 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/amx/common.h +91 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/amx/mmq.cpp +2511 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/amx/mmq.h +10 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/binary-ops.cpp +158 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/binary-ops.h +16 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/cmake/FindSIMD.cmake +100 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/common.h +72 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/cpu-feats-x86.cpp +327 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +6431 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-aarch64.h +8 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-hbm.cpp +55 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-hbm.h +8 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-impl.h +512 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.c +13131 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.h +63 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-traits.cpp +36 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-traits.h +38 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c +3492 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.cpp +671 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/kleidiai/kernels.cpp +254 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/kleidiai/kernels.h +60 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +287 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/kleidiai/kleidiai.h +17 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/llamafile/sgemm.cpp +3544 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/llamafile/sgemm.h +14 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/ops.cpp +8796 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/ops.h +110 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/simd-mappings.h +892 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/unary-ops.cpp +186 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/unary-ops.h +28 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/vec.cpp +252 -0
- package/cpp/llama.cpp/ggml/src/ggml-cpu/vec.h +802 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +184 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/acc.cu +47 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/acc.cuh +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/arange.cu +34 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/arange.cuh +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/argmax.cu +91 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/argmax.cuh +3 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/argsort.cu +104 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/argsort.cuh +3 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/binbcast.cu +363 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/binbcast.cuh +9 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/clamp.cu +45 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/clamp.cuh +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/common.cuh +828 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/concat.cu +221 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/concat.cuh +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/conv-transpose-1d.cu +89 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/conv-transpose-1d.cuh +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/convert.cu +730 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/convert.cuh +26 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/count-equal.cu +64 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/count-equal.cuh +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/cp-async.cuh +57 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/cpy.cu +695 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/cpy.cuh +11 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/cross-entropy-loss.cu +189 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/cross-entropy-loss.cuh +7 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/dequantize.cuh +103 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/diagmask.cu +40 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/diagmask.cuh +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-common.cuh +873 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-mma-f16.cuh +1269 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-tile-f16.cu +357 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-tile-f16.cuh +3 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-tile-f32.cu +365 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-tile-f32.cuh +3 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-vec-f16.cuh +437 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-vec-f32.cuh +428 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-wmma-f16.cu +634 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-wmma-f16.cuh +3 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn.cu +345 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn.cuh +3 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/getrows.cu +275 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/getrows.cuh +15 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/ggml-cuda.cu +3501 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/gla.cu +93 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/gla.cuh +3 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/im2col.cu +103 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/im2col.cuh +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/mma.cuh +396 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/mmq.cu +322 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/mmq.cuh +3217 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/mmv.cu +336 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/mmv.cuh +12 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/mmvq.cu +595 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/mmvq.cuh +12 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/norm.cu +458 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/norm.cuh +11 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/opt-step-adamw.cu +78 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/opt-step-adamw.cuh +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/out-prod.cu +68 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/out-prod.cuh +3 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/pad.cu +49 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/pad.cuh +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/pool2d.cu +94 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/pool2d.cuh +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/quantize.cu +189 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/quantize.cuh +27 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/rope.cu +456 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/rope.cuh +7 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/scale.cu +31 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/scale.cuh +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/softmax.cu +283 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/softmax.cuh +7 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/ssm-conv.cu +148 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/ssm-conv.cuh +3 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/ssm-scan.cu +153 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/ssm-scan.cuh +3 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/sum.cu +45 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/sum.cuh +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/sumrows.cu +39 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/sumrows.cuh +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_16.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_8.cu +10 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_1.cu +10 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_2.cu +10 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_4.cu +10 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_16.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_4.cu +10 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_8.cu +10 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_32-ncols2_1.cu +10 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_32-ncols2_2.cu +10 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_16.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_2.cu +10 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_4.cu +10 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_8.cu +10 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_64-ncols2_1.cu +10 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_1.cu +10 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_2.cu +10 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_4.cu +10 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_8.cu +10 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/generate_cu_files.py +78 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-iq1_s.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_s.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xs.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xxs.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_s.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_xxs.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_nl.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_xs.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-q2_k.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-q3_k.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_k.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_1.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_k.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-q6_k.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-q8_0.cu +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/tsembd.cu +47 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/tsembd.cuh +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/unary.cu +279 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/unary.cuh +57 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/upscale.cu +51 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/upscale.cuh +5 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/vecdotq.cuh +1135 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/vendors/cuda.h +15 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +243 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +140 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/wkv.cu +199 -0
- package/cpp/llama.cpp/ggml/src/ggml-cuda/wkv.cuh +7 -0
- package/cpp/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +131 -0
- package/cpp/llama.cpp/ggml/src/ggml-impl.h +601 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/CMakeLists.txt +166 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/ggml-kompute.cpp +2251 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/common.comp +112 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_add.comp +58 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_addrow.comp +25 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f16.comp +52 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f32.comp +52 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f16.comp +52 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f32.comp +52 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_diagmask.comp +30 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_gelu.comp +22 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_getrows.comp +17 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f16.comp +31 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f32.comp +31 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_0.comp +38 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_1.comp +39 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q6_k.comp +44 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul.comp +52 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_f16.comp +69 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_mat_f32.comp +51 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_0.comp +33 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_1.comp +35 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_k.comp +140 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q6_k.comp +106 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q8_0.comp +73 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n.comp +52 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n_pre.comp +28 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_norm.comp +84 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_relu.comp +21 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_rmsnorm.comp +53 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f16.comp +52 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f32.comp +52 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f16.comp +52 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f32.comp +52 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_scale.comp +19 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_scale_8.comp +23 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_silu.comp +22 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/op_softmax.comp +72 -0
- package/cpp/llama.cpp/ggml/src/ggml-kompute/kompute-shaders/rope_common.comp +71 -0
- package/cpp/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +120 -0
- package/cpp/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +618 -0
- package/cpp/llama.cpp/ggml/src/ggml-metal/ggml-metal.m +5916 -0
- package/cpp/llama.cpp/ggml/src/ggml-metal/ggml-metal.metal +6891 -0
- package/cpp/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +107 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +96 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +4966 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/add.cl +83 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/clamp.cl +20 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/cpy.cl +184 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/cvt.cl +118 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/diag_mask_inf.cl +58 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/embed_kernel.py +26 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/gelu.cl +62 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/gemv_noshuffle.cl +268 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/gemv_noshuffle_general.cl +274 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/get_rows.cl +163 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/im2col_f16.cl +57 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/im2col_f32.cl +57 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul.cl +79 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mat_Ab_Bi_8x4.cl +139 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mv_f16_f16.cl +118 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32.cl +118 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32_1row.cl +94 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32_l4.cl +84 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mv_f32_f32.cl +118 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32.cl +192 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_1d_16x_flat.cl +307 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_1d_8x_flat.cl +265 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_8x_flat.cl +272 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_v.cl +254 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mv_q6_k.cl +190 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/norm.cl +81 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/relu.cl +16 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/rms_norm.cl +96 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/rope.cl +721 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/scale.cl +16 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/silu.cl +30 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/softmax_4_f16.cl +87 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/softmax_4_f32.cl +87 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/softmax_f16.cl +86 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/softmax_f32.cl +86 -0
- package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/transpose.cl +84 -0
- package/cpp/llama.cpp/ggml/src/ggml-opt.cpp +854 -0
- package/cpp/llama.cpp/ggml/src/ggml-quants.c +5232 -0
- package/cpp/llama.cpp/ggml/src/ggml-quants.h +100 -0
- package/cpp/llama.cpp/ggml/src/ggml-rpc/CMakeLists.txt +9 -0
- package/cpp/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +1813 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +183 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/backend.hpp +37 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp +350 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/binbcast.hpp +39 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/common.cpp +83 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/common.hpp +493 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/concat.cpp +197 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/concat.hpp +20 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/conv.cpp +100 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/conv.hpp +20 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/convert.cpp +596 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/convert.hpp +34 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/cpy.cpp +701 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/cpy.hpp +11 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +753 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +1154 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/dmmv.hpp +27 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +2957 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +1559 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +75 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +70 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +311 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/getrows.hpp +20 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +4302 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/gla.cpp +105 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/gla.hpp +8 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +136 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/im2col.hpp +21 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/mmq.cpp +3030 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/mmq.hpp +33 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +1081 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/mmvq.hpp +27 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/norm.cpp +474 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/norm.hpp +26 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +46 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/outprod.hpp +10 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/presets.hpp +74 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/quants.hpp +61 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/rope.cpp +362 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/rope.hpp +20 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +264 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/softmax.hpp +20 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/sycl_hw.cpp +13 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/sycl_hw.hpp +23 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +73 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp +20 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +1189 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/wkv.cpp +305 -0
- package/cpp/llama.cpp/ggml/src/ggml-sycl/wkv.hpp +10 -0
- package/cpp/llama.cpp/ggml/src/ggml-threading.cpp +12 -0
- package/cpp/llama.cpp/ggml/src/ggml-threading.h +14 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +202 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/cmake/host-toolchain.cmake.in +15 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +10502 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +22 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/acc.comp +29 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/add.comp +29 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/argmax.comp +51 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/argsort.comp +69 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/clamp.comp +17 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/concat.comp +41 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/contig_copy.comp +49 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_dw.comp +105 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/copy.comp +23 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp +51 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp +242 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/cos.comp +17 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/count_equal.comp +31 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_f32.comp +20 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp +462 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp +699 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_head.comp +13 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq1_m.comp +42 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq1_s.comp +35 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_s.comp +44 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xs.comp +43 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xxs.comp +48 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_s.comp +39 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_xxs.comp +49 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_nl.comp +32 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_xs.comp +34 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q2_k.comp +34 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q3_k.comp +42 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_0.comp +30 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_1.comp +32 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_k.comp +68 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_0.comp +34 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_1.comp +35 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_k.comp +70 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q6_k.comp +33 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q8_0.comp +31 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/diag_mask_inf.comp +34 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/div.comp +27 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp +483 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +383 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp +59 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/gelu.comp +25 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/gelu_quick.comp +23 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/generic_binary_head.comp +64 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/generic_head.comp +9 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/generic_unary_head.comp +76 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/get_rows.comp +33 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp +41 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/group_norm.comp +66 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp +100 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/l2_norm.comp +41 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/leaky_relu.comp +22 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul.comp +27 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_split_k_reduce.comp +48 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp +169 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp +118 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_m.comp +82 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_s.comp +79 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_s.comp +90 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_xs.comp +87 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_xxs.comp +87 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq3_s.comp +90 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq3_xxs.comp +88 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp +118 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_p021.comp +154 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp +130 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp +132 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp +136 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp +167 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp +130 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +868 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp +441 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq.comp +442 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.comp +99 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/norm.comp +44 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/opt_step_adamw.comp +42 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp +28 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/pool2d.comp +74 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/quantize_q8_1.comp +77 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/relu.comp +21 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/repeat.comp +26 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/repeat_back.comp +37 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp +52 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm_back.comp +55 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.comp +58 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp +60 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp +43 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp +43 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/rope_vision.comp +47 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/scale.comp +24 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/sigmoid.comp +20 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/silu.comp +22 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/silu_back.comp +26 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/sin.comp +17 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp +173 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_back.comp +50 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/square.comp +17 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/sub.comp +29 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/sum_rows.comp +37 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/tanh.comp +20 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/test_bfloat16_support.comp +7 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat2_support.comp +7 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat_support.comp +7 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/test_integer_dot_support.comp +7 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/timestep_embedding.comp +41 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/types.comp +1373 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp +36 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +740 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/wkv6.comp +87 -0
- package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/wkv7.comp +91 -0
- package/cpp/llama.cpp/ggml/src/ggml.c +6499 -0
- package/cpp/llama.cpp/ggml/src/gguf.cpp +1330 -0
- package/cpp/llama.cpp/gguf-py/LICENSE +21 -0
- package/cpp/llama.cpp/gguf-py/README.md +99 -0
- package/cpp/llama.cpp/gguf-py/examples/reader.py +49 -0
- package/cpp/llama.cpp/gguf-py/examples/writer.py +39 -0
- package/cpp/llama.cpp/gguf-py/gguf/__init__.py +9 -0
- package/cpp/llama.cpp/gguf-py/gguf/constants.py +2296 -0
- package/cpp/llama.cpp/gguf-py/gguf/gguf.py +15 -0
- package/cpp/llama.cpp/gguf-py/gguf/gguf_reader.py +367 -0
- package/cpp/llama.cpp/gguf-py/gguf/gguf_writer.py +1041 -0
- package/cpp/llama.cpp/gguf-py/gguf/lazy.py +223 -0
- package/cpp/llama.cpp/gguf-py/gguf/metadata.py +642 -0
- package/cpp/llama.cpp/gguf-py/gguf/py.typed +0 -0
- package/cpp/llama.cpp/gguf-py/gguf/quants.py +1269 -0
- package/cpp/llama.cpp/gguf-py/gguf/scripts/gguf_convert_endian.py +182 -0
- package/cpp/llama.cpp/gguf-py/gguf/scripts/gguf_dump.py +454 -0
- package/cpp/llama.cpp/gguf-py/gguf/scripts/gguf_editor_gui.py +1610 -0
- package/cpp/llama.cpp/gguf-py/gguf/scripts/gguf_hash.py +102 -0
- package/cpp/llama.cpp/gguf-py/gguf/scripts/gguf_new_metadata.py +207 -0
- package/cpp/llama.cpp/gguf-py/gguf/scripts/gguf_set_metadata.py +95 -0
- package/cpp/llama.cpp/gguf-py/gguf/tensor_mapping.py +1172 -0
- package/cpp/llama.cpp/gguf-py/gguf/utility.py +264 -0
- package/cpp/llama.cpp/gguf-py/gguf/vocab.py +492 -0
- package/cpp/llama.cpp/gguf-py/pyproject.toml +43 -0
- package/cpp/llama.cpp/gguf-py/tests/__init__.py +1 -0
- package/cpp/llama.cpp/gguf-py/tests/test_metadata.py +238 -0
- package/cpp/llama.cpp/gguf-py/tests/test_quants.py +238 -0
- package/cpp/llama.cpp/grammars/README.md +382 -0
- package/cpp/llama.cpp/grammars/arithmetic.gbnf +6 -0
- package/cpp/llama.cpp/grammars/c.gbnf +42 -0
- package/cpp/llama.cpp/grammars/chess.gbnf +13 -0
- package/cpp/llama.cpp/grammars/english.gbnf +6 -0
- package/cpp/llama.cpp/grammars/japanese.gbnf +7 -0
- package/cpp/llama.cpp/grammars/json.gbnf +25 -0
- package/cpp/llama.cpp/grammars/json_arr.gbnf +34 -0
- package/cpp/llama.cpp/grammars/list.gbnf +4 -0
- package/cpp/llama.cpp/include/llama-cpp.h +30 -0
- package/cpp/llama.cpp/include/llama.h +1440 -0
- package/cpp/llama.cpp/licenses/LICENSE-curl +9 -0
- package/cpp/llama.cpp/licenses/LICENSE-httplib +21 -0
- package/cpp/llama.cpp/licenses/LICENSE-jsonhpp +21 -0
- package/cpp/llama.cpp/licenses/LICENSE-linenoise +26 -0
- package/cpp/llama.cpp/media/llama0-banner.png +0 -0
- package/cpp/llama.cpp/media/llama0-logo.png +0 -0
- package/cpp/llama.cpp/media/llama1-banner.png +0 -0
- package/cpp/llama.cpp/media/llama1-logo.png +0 -0
- package/cpp/llama.cpp/media/llama1-logo.svg +34 -0
- package/cpp/llama.cpp/media/matmul.png +0 -0
- package/cpp/llama.cpp/media/matmul.svg +1238 -0
- package/cpp/llama.cpp/models/ggml-vocab-aquila.gguf +0 -0
- package/cpp/llama.cpp/models/ggml-vocab-baichuan.gguf +0 -0
- package/cpp/llama.cpp/models/ggml-vocab-bert-bge.gguf +0 -0
- package/cpp/llama.cpp/models/ggml-vocab-bert-bge.gguf.inp +112 -0
- package/cpp/llama.cpp/models/ggml-vocab-bert-bge.gguf.out +46 -0
- package/cpp/llama.cpp/models/ggml-vocab-chameleon.gguf.inp +112 -0
- package/cpp/llama.cpp/models/ggml-vocab-chameleon.gguf.out +46 -0
- package/cpp/llama.cpp/models/ggml-vocab-command-r.gguf +0 -0
- package/cpp/llama.cpp/models/ggml-vocab-command-r.gguf.inp +112 -0
- package/cpp/llama.cpp/models/ggml-vocab-command-r.gguf.out +46 -0
- package/cpp/llama.cpp/models/ggml-vocab-deepseek-coder.gguf +0 -0
- package/cpp/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.inp +112 -0
- package/cpp/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.out +46 -0
- package/cpp/llama.cpp/models/ggml-vocab-deepseek-llm.gguf +0 -0
- package/cpp/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.inp +112 -0
- package/cpp/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.out +46 -0
- package/cpp/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.inp +112 -0
- package/cpp/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.out +46 -0
- package/cpp/llama.cpp/models/ggml-vocab-falcon.gguf +0 -0
- package/cpp/llama.cpp/models/ggml-vocab-falcon.gguf.inp +112 -0
- package/cpp/llama.cpp/models/ggml-vocab-falcon.gguf.out +46 -0
- package/cpp/llama.cpp/models/ggml-vocab-gpt-2.gguf +0 -0
- package/cpp/llama.cpp/models/ggml-vocab-gpt-2.gguf.inp +112 -0
- package/cpp/llama.cpp/models/ggml-vocab-gpt-2.gguf.out +46 -0
- package/cpp/llama.cpp/models/ggml-vocab-gpt-4o.gguf.inp +112 -0
- package/cpp/llama.cpp/models/ggml-vocab-gpt-4o.gguf.out +46 -0
- package/cpp/llama.cpp/models/ggml-vocab-gpt-neox.gguf +0 -0
- package/cpp/llama.cpp/models/ggml-vocab-llama-bpe.gguf +0 -0
- package/cpp/llama.cpp/models/ggml-vocab-llama-bpe.gguf.inp +112 -0
- package/cpp/llama.cpp/models/ggml-vocab-llama-bpe.gguf.out +46 -0
- package/cpp/llama.cpp/models/ggml-vocab-llama-spm.gguf +0 -0
- package/cpp/llama.cpp/models/ggml-vocab-llama-spm.gguf.inp +112 -0
- package/cpp/llama.cpp/models/ggml-vocab-llama-spm.gguf.out +46 -0
- package/cpp/llama.cpp/models/ggml-vocab-llama4.gguf.inp +112 -0
- package/cpp/llama.cpp/models/ggml-vocab-llama4.gguf.out +46 -0
- package/cpp/llama.cpp/models/ggml-vocab-mpt.gguf +0 -0
- package/cpp/llama.cpp/models/ggml-vocab-mpt.gguf.inp +112 -0
- package/cpp/llama.cpp/models/ggml-vocab-mpt.gguf.out +46 -0
- package/cpp/llama.cpp/models/ggml-vocab-phi-3.gguf +0 -0
- package/cpp/llama.cpp/models/ggml-vocab-phi-3.gguf.inp +112 -0
- package/cpp/llama.cpp/models/ggml-vocab-phi-3.gguf.out +46 -0
- package/cpp/llama.cpp/models/ggml-vocab-pixtral.gguf.inp +112 -0
- package/cpp/llama.cpp/models/ggml-vocab-pixtral.gguf.out +46 -0
- package/cpp/llama.cpp/models/ggml-vocab-qwen2.gguf +0 -0
- package/cpp/llama.cpp/models/ggml-vocab-qwen2.gguf.inp +112 -0
- package/cpp/llama.cpp/models/ggml-vocab-qwen2.gguf.out +46 -0
- package/cpp/llama.cpp/models/ggml-vocab-refact.gguf +0 -0
- package/cpp/llama.cpp/models/ggml-vocab-refact.gguf.inp +112 -0
- package/cpp/llama.cpp/models/ggml-vocab-refact.gguf.out +46 -0
- package/cpp/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.inp +112 -0
- package/cpp/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.out +46 -0
- package/cpp/llama.cpp/models/ggml-vocab-starcoder.gguf +0 -0
- package/cpp/llama.cpp/models/ggml-vocab-starcoder.gguf.inp +112 -0
- package/cpp/llama.cpp/models/ggml-vocab-starcoder.gguf.out +46 -0
- package/cpp/llama.cpp/models/templates/CohereForAI-c4ai-command-r-plus-tool_use.jinja +202 -0
- package/cpp/llama.cpp/models/templates/CohereForAI-c4ai-command-r7b-12-2024-tool_use.jinja +156 -0
- package/cpp/llama.cpp/models/templates/NousResearch-Hermes-2-Pro-Llama-3-8B-tool_use.jinja +152 -0
- package/cpp/llama.cpp/models/templates/NousResearch-Hermes-3-Llama-3.1-8B-tool_use.jinja +152 -0
- package/cpp/llama.cpp/models/templates/Qwen-Qwen2.5-7B-Instruct.jinja +54 -0
- package/cpp/llama.cpp/models/templates/README.md +22 -0
- package/cpp/llama.cpp/models/templates/deepseek-ai-DeepSeek-R1-Distill-Llama-8B.jinja +1 -0
- package/cpp/llama.cpp/models/templates/deepseek-ai-DeepSeek-R1-Distill-Qwen-32B.jinja +1 -0
- package/cpp/llama.cpp/models/templates/fireworks-ai-llama-3-firefunction-v2.jinja +57 -0
- package/cpp/llama.cpp/models/templates/google-gemma-2-2b-it.jinja +4 -0
- package/cpp/llama.cpp/models/templates/llama-cpp-deepseek-r1.jinja +76 -0
- package/cpp/llama.cpp/models/templates/meetkai-functionary-medium-v3.1.jinja +58 -0
- package/cpp/llama.cpp/models/templates/meetkai-functionary-medium-v3.2.jinja +287 -0
- package/cpp/llama.cpp/models/templates/meta-llama-Llama-3.1-8B-Instruct.jinja +109 -0
- package/cpp/llama.cpp/models/templates/meta-llama-Llama-3.2-3B-Instruct.jinja +93 -0
- package/cpp/llama.cpp/models/templates/meta-llama-Llama-3.3-70B-Instruct.jinja +109 -0
- package/cpp/llama.cpp/models/templates/microsoft-Phi-3.5-mini-instruct.jinja +8 -0
- package/cpp/llama.cpp/models/templates/mistralai-Mistral-Nemo-Instruct-2407.jinja +87 -0
- package/cpp/llama.cpp/mypy.ini +7 -0
- package/cpp/llama.cpp/pocs/CMakeLists.txt +14 -0
- package/cpp/llama.cpp/pocs/vdot/CMakeLists.txt +9 -0
- package/cpp/llama.cpp/pocs/vdot/q8dot.cpp +173 -0
- package/cpp/llama.cpp/pocs/vdot/vdot.cpp +311 -0
- package/cpp/llama.cpp/poetry.lock +1197 -0
- package/cpp/llama.cpp/prompts/LLM-questions.txt +49 -0
- package/cpp/llama.cpp/prompts/alpaca.txt +1 -0
- package/cpp/llama.cpp/prompts/assistant.txt +31 -0
- package/cpp/llama.cpp/prompts/chat-with-baichuan.txt +4 -0
- package/cpp/llama.cpp/prompts/chat-with-bob.txt +7 -0
- package/cpp/llama.cpp/prompts/chat-with-qwen.txt +1 -0
- package/cpp/llama.cpp/prompts/chat-with-vicuna-v0.txt +7 -0
- package/cpp/llama.cpp/prompts/chat-with-vicuna-v1.txt +7 -0
- package/cpp/llama.cpp/prompts/chat.txt +28 -0
- package/cpp/llama.cpp/prompts/dan-modified.txt +1 -0
- package/cpp/llama.cpp/prompts/dan.txt +1 -0
- package/cpp/llama.cpp/prompts/mnemonics.txt +93 -0
- package/cpp/llama.cpp/prompts/parallel-questions.txt +43 -0
- package/cpp/llama.cpp/prompts/reason-act.txt +18 -0
- package/cpp/llama.cpp/pyproject.toml +45 -0
- package/cpp/llama.cpp/pyrightconfig.json +22 -0
- package/cpp/llama.cpp/requirements/requirements-all.txt +15 -0
- package/cpp/llama.cpp/requirements/requirements-compare-llama-bench.txt +2 -0
- package/cpp/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt +3 -0
- package/cpp/llama.cpp/requirements/requirements-convert_hf_to_gguf_update.txt +3 -0
- package/cpp/llama.cpp/requirements/requirements-convert_legacy_llama.txt +5 -0
- package/cpp/llama.cpp/requirements/requirements-convert_llama_ggml_to_gguf.txt +1 -0
- package/cpp/llama.cpp/requirements/requirements-convert_lora_to_gguf.txt +2 -0
- package/cpp/llama.cpp/requirements/requirements-gguf_editor_gui.txt +3 -0
- package/cpp/llama.cpp/requirements/requirements-pydantic.txt +3 -0
- package/cpp/llama.cpp/requirements/requirements-test-tokenizer-random.txt +1 -0
- package/cpp/llama.cpp/requirements/requirements-tool_bench.txt +12 -0
- package/cpp/llama.cpp/requirements.txt +13 -0
- package/cpp/llama.cpp/src/CMakeLists.txt +45 -0
- package/cpp/llama.cpp/src/llama-adapter.cpp +388 -0
- package/cpp/llama.cpp/src/llama-adapter.h +76 -0
- package/cpp/llama.cpp/src/llama-arch.cpp +1743 -0
- package/cpp/llama.cpp/src/llama-arch.h +437 -0
- package/cpp/llama.cpp/src/llama-batch.cpp +372 -0
- package/cpp/llama.cpp/src/llama-batch.h +89 -0
- package/cpp/llama.cpp/src/llama-chat.cpp +663 -0
- package/cpp/llama.cpp/src/llama-chat.h +58 -0
- package/cpp/llama.cpp/src/llama-context.cpp +2459 -0
- package/cpp/llama.cpp/src/llama-context.h +246 -0
- package/cpp/llama.cpp/src/llama-cparams.cpp +1 -0
- package/cpp/llama.cpp/src/llama-cparams.h +39 -0
- package/cpp/llama.cpp/src/llama-grammar.cpp +1219 -0
- package/cpp/llama.cpp/src/llama-grammar.h +173 -0
- package/cpp/llama.cpp/src/llama-graph.cpp +1713 -0
- package/cpp/llama.cpp/src/llama-graph.h +595 -0
- package/cpp/llama.cpp/src/llama-hparams.cpp +79 -0
- package/cpp/llama.cpp/src/llama-hparams.h +161 -0
- package/cpp/llama.cpp/src/llama-impl.cpp +167 -0
- package/cpp/llama.cpp/src/llama-impl.h +61 -0
- package/cpp/llama.cpp/src/llama-io.cpp +15 -0
- package/cpp/llama.cpp/src/llama-io.h +35 -0
- package/cpp/llama.cpp/src/llama-kv-cache.cpp +2486 -0
- package/cpp/llama.cpp/src/llama-kv-cache.h +405 -0
- package/cpp/llama.cpp/src/llama-memory.cpp +1 -0
- package/cpp/llama.cpp/src/llama-memory.h +31 -0
- package/cpp/llama.cpp/src/llama-mmap.cpp +600 -0
- package/cpp/llama.cpp/src/llama-mmap.h +68 -0
- package/cpp/llama.cpp/src/llama-model-loader.cpp +1133 -0
- package/cpp/llama.cpp/src/llama-model-loader.h +169 -0
- package/cpp/llama.cpp/src/llama-model.cpp +13453 -0
- package/cpp/llama.cpp/src/llama-model.h +420 -0
- package/cpp/llama.cpp/src/llama-quant.cpp +964 -0
- package/cpp/llama.cpp/src/llama-quant.h +1 -0
- package/cpp/llama.cpp/src/llama-sampling.cpp +2575 -0
- package/cpp/llama.cpp/src/llama-sampling.h +32 -0
- package/cpp/llama.cpp/src/llama-vocab.cpp +3313 -0
- package/cpp/llama.cpp/src/llama-vocab.h +125 -0
- package/cpp/llama.cpp/src/llama.cpp +340 -0
- package/cpp/llama.cpp/src/unicode-data.cpp +7034 -0
- package/cpp/llama.cpp/src/unicode-data.h +20 -0
- package/cpp/llama.cpp/src/unicode.cpp +849 -0
- package/cpp/llama.cpp/src/unicode.h +66 -0
- package/cpp/rn-completion.cpp +431 -0
- package/cpp/rn-llama.hpp +60 -0
- package/cpp/rn-utils.hpp +331 -0
- package/ios/OnLoad.mm +22 -0
- package/ios/generated/RNLlamaCppSpec/RNLlamaCppSpec-generated.mm +64 -0
- package/ios/generated/RNLlamaCppSpec/RNLlamaCppSpec.h +251 -0
- package/ios/generated/RNLlamaCppSpecJSI-generated.cpp +42 -0
- package/ios/generated/RNLlamaCppSpecJSI.h +336 -0
- package/ios/include/chat.h +135 -0
- package/ios/include/common/base64.hpp +392 -0
- package/ios/include/common/json.hpp +24766 -0
- package/ios/include/common/minja/chat-template.hpp +537 -0
- package/ios/include/common/minja/minja.hpp +2941 -0
- package/ios/include/common.h +668 -0
- package/ios/include/json-schema-to-grammar.h +21 -0
- package/ios/include/llama-cpp.h +30 -0
- package/ios/include/llama.h +1440 -0
- package/ios/include/log.h +103 -0
- package/ios/include/ngram-cache.h +101 -0
- package/ios/include/sampling.h +107 -0
- package/ios/include/speculative.h +28 -0
- package/ios/libs/llama.xcframework/Info.plist +135 -0
- package/ios/libs/llama.xcframework/ios-arm64/dSYMs/llama.dSYM/Contents/Info.plist +20 -0
- package/ios/libs/llama.xcframework/ios-arm64/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
- package/ios/libs/llama.xcframework/ios-arm64/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4492 -0
- package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Headers/ggml-alloc.h +76 -0
- package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Headers/ggml-backend.h +354 -0
- package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Headers/ggml-blas.h +25 -0
- package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Headers/ggml-cpu.h +143 -0
- package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Headers/ggml-metal.h +66 -0
- package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Headers/ggml.h +2192 -0
- package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Headers/gguf.h +202 -0
- package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Headers/llama.h +1440 -0
- package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Info.plist +36 -0
- package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Modules/module.modulemap +17 -0
- package/ios/libs/llama.xcframework/ios-arm64/llama.framework/llama +0 -0
- package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Info.plist +20 -0
- package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
- package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4513 -0
- package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/x86_64/llama.yml +3440 -0
- package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Headers/ggml-alloc.h +76 -0
- package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Headers/ggml-backend.h +354 -0
- package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Headers/ggml-blas.h +25 -0
- package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Headers/ggml-cpu.h +143 -0
- package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Headers/ggml-metal.h +66 -0
- package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Headers/ggml.h +2192 -0
- package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Headers/gguf.h +202 -0
- package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Headers/llama.h +1440 -0
- package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Info.plist +36 -0
- package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Modules/module.modulemap +17 -0
- package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/llama +0 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/dSYMs/llama.dSYM/Contents/Info.plist +20 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4513 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/dSYMs/llama.dSYM/Contents/Resources/Relocations/x86_64/llama.yml +3442 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Headers/ggml-alloc.h +76 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Headers/ggml-backend.h +354 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Headers/ggml-blas.h +25 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Headers/ggml-cpu.h +143 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Headers/ggml-metal.h +66 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Headers/ggml.h +2192 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Headers/gguf.h +202 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Headers/llama.h +1440 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Modules/module.modulemap +17 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Resources/Info.plist +32 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Headers/ggml-alloc.h +76 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Headers/ggml-backend.h +354 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Headers/ggml-blas.h +25 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Headers/ggml-cpu.h +143 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Headers/ggml-metal.h +66 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Headers/ggml.h +2192 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Headers/gguf.h +202 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Headers/llama.h +1440 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Modules/module.modulemap +17 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Resources/Info.plist +32 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/llama +0 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Headers/ggml-alloc.h +76 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Headers/ggml-backend.h +354 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Headers/ggml-blas.h +25 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Headers/ggml-cpu.h +143 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Headers/ggml-metal.h +66 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Headers/ggml.h +2192 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Headers/gguf.h +202 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Headers/llama.h +1440 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Modules/module.modulemap +17 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Resources/Info.plist +32 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/llama +0 -0
- package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/llama +0 -0
- package/ios/libs/llama.xcframework/tvos-arm64/dSYMs/llama.dSYM/Contents/Info.plist +20 -0
- package/ios/libs/llama.xcframework/tvos-arm64/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
- package/ios/libs/llama.xcframework/tvos-arm64/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4492 -0
- package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Headers/ggml-alloc.h +76 -0
- package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Headers/ggml-backend.h +354 -0
- package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Headers/ggml-blas.h +25 -0
- package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Headers/ggml-cpu.h +143 -0
- package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Headers/ggml-metal.h +66 -0
- package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Headers/ggml.h +2192 -0
- package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Headers/gguf.h +202 -0
- package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Headers/llama.h +1440 -0
- package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Info.plist +35 -0
- package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Modules/module.modulemap +17 -0
- package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/llama +0 -0
- package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Info.plist +20 -0
- package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
- package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4513 -0
- package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/x86_64/llama.yml +3440 -0
- package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Headers/ggml-alloc.h +76 -0
- package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Headers/ggml-backend.h +354 -0
- package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Headers/ggml-blas.h +25 -0
- package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Headers/ggml-cpu.h +143 -0
- package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Headers/ggml-metal.h +66 -0
- package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Headers/ggml.h +2192 -0
- package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Headers/gguf.h +202 -0
- package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Headers/llama.h +1440 -0
- package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Info.plist +35 -0
- package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Modules/module.modulemap +17 -0
- package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/llama +0 -0
- package/ios/libs/llama.xcframework/xros-arm64/dSYMs/llama.dSYM/Contents/Info.plist +20 -0
- package/ios/libs/llama.xcframework/xros-arm64/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
- package/ios/libs/llama.xcframework/xros-arm64/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4528 -0
- package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Headers/ggml-alloc.h +76 -0
- package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Headers/ggml-backend.h +354 -0
- package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Headers/ggml-blas.h +25 -0
- package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Headers/ggml-cpu.h +143 -0
- package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Headers/ggml-metal.h +66 -0
- package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Headers/ggml.h +2192 -0
- package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Headers/gguf.h +202 -0
- package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Headers/llama.h +1440 -0
- package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Info.plist +32 -0
- package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Modules/module.modulemap +17 -0
- package/ios/libs/llama.xcframework/xros-arm64/llama.framework/llama +0 -0
- package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Info.plist +20 -0
- package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
- package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4549 -0
- package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/x86_64/llama.yml +3470 -0
- package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Headers/ggml-alloc.h +76 -0
- package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Headers/ggml-backend.h +354 -0
- package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Headers/ggml-blas.h +25 -0
- package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Headers/ggml-cpu.h +143 -0
- package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Headers/ggml-metal.h +66 -0
- package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Headers/ggml.h +2192 -0
- package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Headers/gguf.h +202 -0
- package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Headers/llama.h +1440 -0
- package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Info.plist +32 -0
- package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Modules/module.modulemap +17 -0
- package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/llama +0 -0
- package/lib/module/NativeRNLlamaCpp.js +35 -0
- package/lib/module/NativeRNLlamaCpp.js.map +1 -0
- package/lib/module/index.js +20 -0
- package/lib/module/index.js.map +1 -0
- package/lib/module/package.json +1 -0
- package/lib/typescript/package.json +1 -0
- package/lib/typescript/src/NativeRNLlamaCpp.d.ts +222 -0
- package/lib/typescript/src/NativeRNLlamaCpp.d.ts.map +1 -0
- package/lib/typescript/src/index.d.ts +5 -0
- package/lib/typescript/src/index.d.ts.map +1 -0
- package/package.json +161 -0
- package/react-native.config.js +15 -0
- package/src/NativeRNLlamaCpp.ts +282 -0
- package/src/index.tsx +54 -0
|
@@ -0,0 +1,854 @@
|
|
|
1
|
+
#include "ggml-opt.h"
|
|
2
|
+
|
|
3
|
+
#include "ggml.h"
|
|
4
|
+
#include "ggml-alloc.h"
|
|
5
|
+
#include "ggml-backend.h"
|
|
6
|
+
#include "ggml-impl.h"
|
|
7
|
+
|
|
8
|
+
#include <algorithm>
|
|
9
|
+
#include <cmath>
|
|
10
|
+
#include <cstdint>
|
|
11
|
+
#include <cinttypes>
|
|
12
|
+
#include <map>
|
|
13
|
+
#include <random>
|
|
14
|
+
#include <vector>
|
|
15
|
+
|
|
16
|
+
struct ggml_opt_dataset {
|
|
17
|
+
struct ggml_context * ctx = nullptr;
|
|
18
|
+
ggml_backend_buffer_t buf = nullptr;
|
|
19
|
+
struct ggml_tensor * data = nullptr;
|
|
20
|
+
struct ggml_tensor * labels = nullptr;
|
|
21
|
+
|
|
22
|
+
int64_t ndata = -1;
|
|
23
|
+
int64_t ndata_shard = -1;
|
|
24
|
+
size_t nbs_data = -1;
|
|
25
|
+
size_t nbs_labels = -1;
|
|
26
|
+
|
|
27
|
+
std::vector<int64_t> permutation;
|
|
28
|
+
};
|
|
29
|
+
|
|
30
|
+
struct ggml_opt_context {
|
|
31
|
+
ggml_backend_sched_t backend_sched = nullptr;
|
|
32
|
+
ggml_cgraph * allocated_graph = nullptr;
|
|
33
|
+
ggml_cgraph * allocated_graph_copy = nullptr;
|
|
34
|
+
struct ggml_context * ctx_static = nullptr;
|
|
35
|
+
struct ggml_context * ctx_static_cpu = nullptr;
|
|
36
|
+
struct ggml_context * ctx_compute = nullptr;
|
|
37
|
+
struct ggml_context * ctx_copy = nullptr;
|
|
38
|
+
ggml_backend_buffer_t buf_static = nullptr;
|
|
39
|
+
ggml_backend_buffer_t buf_static_cpu = nullptr;
|
|
40
|
+
std::mt19937 rng;
|
|
41
|
+
|
|
42
|
+
struct ggml_tensor * inputs = nullptr;
|
|
43
|
+
struct ggml_tensor * outputs = nullptr;
|
|
44
|
+
struct ggml_tensor * labels = nullptr;
|
|
45
|
+
|
|
46
|
+
struct ggml_tensor * loss = nullptr;
|
|
47
|
+
struct ggml_tensor * pred = nullptr;
|
|
48
|
+
struct ggml_tensor * ncorrect = nullptr;
|
|
49
|
+
|
|
50
|
+
struct ggml_cgraph * gf = nullptr;
|
|
51
|
+
struct ggml_cgraph * gb_grad = nullptr;
|
|
52
|
+
struct ggml_cgraph * gb_opt = nullptr;
|
|
53
|
+
|
|
54
|
+
int64_t iter = 1;
|
|
55
|
+
int32_t opt_period = 1;
|
|
56
|
+
int32_t opt_i = 0;
|
|
57
|
+
bool loss_per_datapoint = false;
|
|
58
|
+
|
|
59
|
+
ggml_opt_get_optimizer_params get_opt_pars = nullptr;
|
|
60
|
+
void * get_opt_pars_ud = nullptr;
|
|
61
|
+
struct ggml_tensor * adamw_params = nullptr;
|
|
62
|
+
};
|
|
63
|
+
|
|
64
|
+
struct ggml_opt_result {
|
|
65
|
+
int64_t ndata = 0;
|
|
66
|
+
std::vector<float> loss;
|
|
67
|
+
std::vector<int32_t> pred;
|
|
68
|
+
int64_t ncorrect = 0;
|
|
69
|
+
|
|
70
|
+
int64_t opt_period = -1;
|
|
71
|
+
bool loss_per_datapoint = false;
|
|
72
|
+
};
|
|
73
|
+
|
|
74
|
+
// ====== Dataset ======
|
|
75
|
+
|
|
76
|
+
ggml_opt_dataset_t ggml_opt_dataset_init(int64_t ne_datapoint, int64_t ne_label, int64_t ndata, int64_t ndata_shard) {
|
|
77
|
+
GGML_ASSERT(ne_datapoint > 0);
|
|
78
|
+
GGML_ASSERT(ne_label >= 0);
|
|
79
|
+
GGML_ASSERT(ndata > 0);
|
|
80
|
+
GGML_ASSERT(ndata_shard > 0);
|
|
81
|
+
|
|
82
|
+
ggml_opt_dataset_t result = new ggml_opt_dataset;
|
|
83
|
+
result->ndata = ndata;
|
|
84
|
+
result->ndata_shard = ndata_shard;
|
|
85
|
+
|
|
86
|
+
{
|
|
87
|
+
struct ggml_init_params params = {
|
|
88
|
+
/*.mem_size =*/ 2*ggml_tensor_overhead(),
|
|
89
|
+
/*.mem_buffer =*/ nullptr,
|
|
90
|
+
/*.no_alloc =*/ true,
|
|
91
|
+
};
|
|
92
|
+
result->ctx = ggml_init(params);
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
result->data = ggml_new_tensor_2d(result->ctx, GGML_TYPE_F32, ne_datapoint, ndata);
|
|
96
|
+
result->nbs_data = ggml_nbytes(result->data) * ndata_shard/ndata;
|
|
97
|
+
|
|
98
|
+
if (ne_label > 0) {
|
|
99
|
+
result->labels = ggml_new_tensor_2d(result->ctx, GGML_TYPE_F32, ne_label, ndata);
|
|
100
|
+
result->nbs_labels = ggml_nbytes(result->labels) * ndata_shard/ndata;
|
|
101
|
+
} else {
|
|
102
|
+
result->labels = nullptr;
|
|
103
|
+
result->nbs_labels = 0;
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
result->buf = ggml_backend_alloc_ctx_tensors_from_buft(result->ctx, ggml_backend_cpu_buffer_type());
|
|
107
|
+
|
|
108
|
+
const int64_t nshards = ndata/ndata_shard;
|
|
109
|
+
result->permutation.resize(nshards);
|
|
110
|
+
for (int64_t i = 0; i < nshards; ++i) {
|
|
111
|
+
result->permutation[i] = i;
|
|
112
|
+
}
|
|
113
|
+
return result;
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
void ggml_opt_dataset_free(ggml_opt_dataset_t dataset) {
|
|
117
|
+
ggml_backend_buffer_free(dataset->buf);
|
|
118
|
+
ggml_free(dataset->ctx);
|
|
119
|
+
delete dataset;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
struct ggml_tensor * ggml_opt_dataset_data(ggml_opt_dataset_t dataset) {
|
|
123
|
+
return dataset->data;
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
struct ggml_tensor * ggml_opt_dataset_labels(ggml_opt_dataset_t dataset) {
|
|
127
|
+
return dataset->labels;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
void ggml_opt_dataset_shuffle(ggml_opt_context_t opt_ctx, ggml_opt_dataset_t dataset, int64_t idata) {
|
|
131
|
+
GGML_ASSERT(idata <= dataset->ndata);
|
|
132
|
+
|
|
133
|
+
if (idata < 0) {
|
|
134
|
+
std::shuffle(dataset->permutation.begin(), dataset->permutation.end(), opt_ctx->rng);
|
|
135
|
+
return;
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
GGML_ASSERT(idata % dataset->ndata_shard == 0);
|
|
139
|
+
const int64_t ishard_max = idata / dataset->ndata_shard;
|
|
140
|
+
std::shuffle(dataset->permutation.begin(), dataset->permutation.begin() + ishard_max, opt_ctx->rng);
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
void ggml_opt_dataset_get_batch(ggml_opt_dataset_t dataset, struct ggml_tensor * data_batch, struct ggml_tensor * labels_batch, int64_t ibatch) {
|
|
144
|
+
GGML_ASSERT( data_batch && ggml_is_contiguous(data_batch));
|
|
145
|
+
GGML_ASSERT(!labels_batch || ggml_is_contiguous(labels_batch));
|
|
146
|
+
GGML_ASSERT((labels_batch == nullptr) == (dataset->labels == nullptr));
|
|
147
|
+
|
|
148
|
+
const size_t nb_data_batch = ggml_nbytes(data_batch);
|
|
149
|
+
GGML_ASSERT(nb_data_batch % dataset->nbs_data == 0);
|
|
150
|
+
const int64_t shards_per_batch = nb_data_batch / dataset->nbs_data;
|
|
151
|
+
|
|
152
|
+
if (labels_batch) {
|
|
153
|
+
const size_t nb_labels_batch = ggml_nbytes(labels_batch);
|
|
154
|
+
GGML_ASSERT(nb_labels_batch == shards_per_batch*dataset->nbs_labels);
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
GGML_ASSERT((ibatch + 1)*shards_per_batch <= int64_t(dataset->permutation.size()));
|
|
158
|
+
|
|
159
|
+
for (int64_t ishard_batch = 0; ishard_batch < shards_per_batch; ++ishard_batch) {
|
|
160
|
+
const int64_t ishard = dataset->permutation[ibatch*shards_per_batch + ishard_batch];
|
|
161
|
+
|
|
162
|
+
const char * ptr_data = (const char *) dataset->data->data + ishard*dataset->nbs_data;
|
|
163
|
+
ggml_backend_tensor_set(data_batch, ptr_data, ishard_batch*dataset->nbs_data, dataset->nbs_data);
|
|
164
|
+
|
|
165
|
+
if (!labels_batch) {
|
|
166
|
+
continue;
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
const char * ptr_labels = (const char *) dataset->labels->data + ishard*dataset->nbs_labels;
|
|
170
|
+
ggml_backend_tensor_set(labels_batch, ptr_labels, ishard_batch*dataset->nbs_labels, dataset->nbs_labels);
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
// ====== Model / Context ======
|
|
175
|
+
|
|
176
|
+
struct ggml_opt_optimizer_params ggml_opt_get_default_optimizer_params(void * userdata) {
|
|
177
|
+
GGML_UNUSED(userdata);
|
|
178
|
+
|
|
179
|
+
ggml_opt_optimizer_params result;
|
|
180
|
+
|
|
181
|
+
result.adamw.alpha = 0.001f;
|
|
182
|
+
result.adamw.beta1 = 0.9f;
|
|
183
|
+
result.adamw.beta2 = 0.999f;
|
|
184
|
+
result.adamw.eps = 1e-8f;
|
|
185
|
+
result.adamw.wd = 0.0f;
|
|
186
|
+
|
|
187
|
+
return result;
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
struct ggml_opt_params ggml_opt_default_params(
|
|
191
|
+
ggml_backend_sched_t backend_sched,
|
|
192
|
+
struct ggml_context * ctx_compute,
|
|
193
|
+
struct ggml_tensor * inputs,
|
|
194
|
+
struct ggml_tensor * outputs,
|
|
195
|
+
enum ggml_opt_loss_type loss_type) {
|
|
196
|
+
return {
|
|
197
|
+
/*backend_sched =*/ backend_sched,
|
|
198
|
+
/*ctx_compute =*/ ctx_compute,
|
|
199
|
+
/*inputs =*/ inputs,
|
|
200
|
+
/*logits =*/ outputs,
|
|
201
|
+
/*loss_type =*/ loss_type,
|
|
202
|
+
/*build_type =*/ GGML_OPT_BUILD_TYPE_OPT,
|
|
203
|
+
/*opt_period =*/ 1,
|
|
204
|
+
/*get_opt_pars =*/ ggml_opt_get_default_optimizer_params,
|
|
205
|
+
/*get_opt_pars_ud =*/ nullptr,
|
|
206
|
+
};
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
static ggml_tensor * map_tensor(std::map<ggml_tensor *, ggml_tensor *> & tensor_map, ggml_context * ctx, ggml_tensor * tensor) {
|
|
210
|
+
if (!tensor) {
|
|
211
|
+
return nullptr;
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
if (tensor_map.find(tensor) != tensor_map.end()) {
|
|
215
|
+
return tensor_map[tensor];
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
ggml_tensor * new_tensor = ggml_dup_tensor(ctx, tensor);
|
|
219
|
+
tensor_map[tensor] = new_tensor;
|
|
220
|
+
|
|
221
|
+
new_tensor->op = tensor->op;
|
|
222
|
+
for (int i = 0; i < GGML_MAX_DIMS; i++) {
|
|
223
|
+
new_tensor->nb[i] = tensor->nb[i];
|
|
224
|
+
}
|
|
225
|
+
new_tensor->flags = tensor->flags;
|
|
226
|
+
memcpy(new_tensor->op_params, tensor->op_params, sizeof(tensor->op_params));
|
|
227
|
+
strcpy(new_tensor->name, tensor->name);
|
|
228
|
+
new_tensor->data = tensor->data;
|
|
229
|
+
new_tensor->buffer = tensor->buffer;
|
|
230
|
+
new_tensor->extra = tensor->extra;
|
|
231
|
+
new_tensor->view_offs = tensor->view_offs;
|
|
232
|
+
new_tensor->view_src = map_tensor(tensor_map, ctx, tensor->view_src);
|
|
233
|
+
for (int i = 0; i < GGML_MAX_SRC; i++) {
|
|
234
|
+
new_tensor->src[i] = map_tensor(tensor_map, ctx, tensor->src[i]);
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
return new_tensor;
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
static ggml_cgraph * dup_graph(ggml_context * ctx, ggml_cgraph * src) {
|
|
241
|
+
std::map<ggml_tensor *, ggml_tensor *> tensor_map;
|
|
242
|
+
|
|
243
|
+
ggml_cgraph * dst = ggml_new_graph_custom(ctx, src->size, /*grads =*/ true);
|
|
244
|
+
|
|
245
|
+
for (int i = 0; i < src->n_leafs; i++) {
|
|
246
|
+
ggml_build_forward_expand(dst, map_tensor(tensor_map, ctx, src->leafs[i]));
|
|
247
|
+
}
|
|
248
|
+
GGML_ASSERT(dst->n_leafs == src->n_leafs);
|
|
249
|
+
for (int i = 0; i < src->n_nodes; i++) {
|
|
250
|
+
ggml_build_forward_expand(dst, map_tensor(tensor_map, ctx, src->nodes[i]));
|
|
251
|
+
}
|
|
252
|
+
GGML_ASSERT(dst->n_nodes == src->n_nodes);
|
|
253
|
+
for (int i = 0; i < src->n_nodes; ++i) {
|
|
254
|
+
const size_t igrad_src = ggml_hash_find(&src->visited_hash_set, src->nodes[i]);
|
|
255
|
+
const size_t igrad_dst = ggml_hash_find(&dst->visited_hash_set, dst->nodes[i]);
|
|
256
|
+
|
|
257
|
+
GGML_ASSERT(igrad_src != GGML_HASHSET_FULL);
|
|
258
|
+
GGML_ASSERT(ggml_bitset_get(src->visited_hash_set.used, igrad_src));
|
|
259
|
+
GGML_ASSERT(igrad_dst != GGML_HASHSET_FULL);
|
|
260
|
+
GGML_ASSERT(ggml_bitset_get(dst->visited_hash_set.used, igrad_dst));
|
|
261
|
+
|
|
262
|
+
dst->grads[igrad_dst] = src->grads[igrad_src];
|
|
263
|
+
dst->grad_accs[igrad_dst] = src->grad_accs[igrad_src];
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
return dst;
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
static void ggml_opt_alloc_graph(ggml_opt_context_t opt_ctx, ggml_cgraph * graph) {
|
|
270
|
+
GGML_ASSERT(graph);
|
|
271
|
+
if (opt_ctx->allocated_graph == graph) {
|
|
272
|
+
return;
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
ggml_backend_sched_reset(opt_ctx->backend_sched); // clear allocation of previous graph
|
|
276
|
+
|
|
277
|
+
{
|
|
278
|
+
ggml_init_params params = {
|
|
279
|
+
/*.mem_size =*/ ggml_tensor_overhead() * GGML_DEFAULT_GRAPH_SIZE,
|
|
280
|
+
/*.mem_buffer =*/ nullptr,
|
|
281
|
+
/*.no_alloc =*/ true,
|
|
282
|
+
};
|
|
283
|
+
ggml_free(opt_ctx->ctx_copy);
|
|
284
|
+
opt_ctx->ctx_copy = ggml_init(params);
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
opt_ctx->allocated_graph_copy = dup_graph(opt_ctx->ctx_copy, graph);
|
|
288
|
+
|
|
289
|
+
ggml_backend_sched_alloc_graph(opt_ctx->backend_sched, opt_ctx->allocated_graph_copy);
|
|
290
|
+
opt_ctx->allocated_graph = graph;
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
ggml_opt_context_t ggml_opt_init(struct ggml_opt_params params) {
|
|
294
|
+
ggml_opt_context_t result = new struct ggml_opt_context;
|
|
295
|
+
result->backend_sched = params.backend_sched;
|
|
296
|
+
result->ctx_compute = params.ctx_compute;
|
|
297
|
+
result->inputs = params.inputs;
|
|
298
|
+
result->outputs = params.outputs;
|
|
299
|
+
result->opt_period = params.opt_period;
|
|
300
|
+
result->get_opt_pars = params.get_opt_pars;
|
|
301
|
+
result->get_opt_pars_ud = params.get_opt_pars_ud;
|
|
302
|
+
|
|
303
|
+
GGML_ASSERT(result->inputs->data && "the inputs must be allocated statically");
|
|
304
|
+
GGML_ASSERT(result->opt_period >= 1);
|
|
305
|
+
|
|
306
|
+
const bool accumulate = params.build_type == GGML_OPT_BUILD_TYPE_GRAD ||
|
|
307
|
+
(params.build_type == GGML_OPT_BUILD_TYPE_OPT && result->opt_period > 1);
|
|
308
|
+
|
|
309
|
+
ggml_set_input(result->inputs);
|
|
310
|
+
ggml_set_output(result->outputs);
|
|
311
|
+
|
|
312
|
+
result->gf = ggml_new_graph_custom(result->ctx_compute, GGML_DEFAULT_GRAPH_SIZE, /*grads =*/ true); // Forward pass.
|
|
313
|
+
ggml_build_forward_expand(result->gf, result->outputs);
|
|
314
|
+
|
|
315
|
+
int n_param = 0;
|
|
316
|
+
for (int i = 0; i < result->gf->n_nodes; ++i) {
|
|
317
|
+
if (result->gf->nodes[i]->flags & GGML_TENSOR_FLAG_PARAM) {
|
|
318
|
+
n_param++;
|
|
319
|
+
}
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
{
|
|
323
|
+
// The static context is used for:
|
|
324
|
+
// - gradients (1 tensor per param if using gradient accumulation)
|
|
325
|
+
// - optimizer momenta (2 tensors per param)
|
|
326
|
+
// - labels
|
|
327
|
+
// - loss + its gradient (up to 5 tensors)
|
|
328
|
+
// - pred
|
|
329
|
+
// - ncorrect (2 tensors).
|
|
330
|
+
const size_t tensors_per_param = (accumulate ? 1 : 0) + (params.build_type == GGML_OPT_BUILD_TYPE_OPT ? 2 : 0);
|
|
331
|
+
const size_t size_meta = (tensors_per_param*n_param + 9) * ggml_tensor_overhead();
|
|
332
|
+
struct ggml_init_params params = {
|
|
333
|
+
/*.mem_size =*/ size_meta,
|
|
334
|
+
/*.mem_buffer =*/ nullptr,
|
|
335
|
+
/*.no_alloc =*/ true,
|
|
336
|
+
};
|
|
337
|
+
result->ctx_static = ggml_init(params);
|
|
338
|
+
}
|
|
339
|
+
{
|
|
340
|
+
// The static cpu context is used for:
|
|
341
|
+
// - optimizer parameters (1 for the entire context)
|
|
342
|
+
const size_t size_meta = 1 * ggml_tensor_overhead();
|
|
343
|
+
struct ggml_init_params params = {
|
|
344
|
+
/*.mem_size =*/ size_meta,
|
|
345
|
+
/*.mem_buffer =*/ nullptr,
|
|
346
|
+
/*.no_alloc =*/ true,
|
|
347
|
+
};
|
|
348
|
+
result->ctx_static_cpu = ggml_init(params);
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
|
|
352
|
+
switch (params.loss_type) {
|
|
353
|
+
case GGML_OPT_LOSS_TYPE_MEAN: {
|
|
354
|
+
result->loss = ggml_sum(result->ctx_static, result->outputs);
|
|
355
|
+
ggml_set_name(result->loss, "loss_sum");
|
|
356
|
+
const float scale = 1.0f / (result->opt_period * ggml_nelements(result->outputs));
|
|
357
|
+
result->loss = ggml_scale(result->ctx_static, result->loss, scale);
|
|
358
|
+
ggml_set_name(result->loss, "loss_mean");
|
|
359
|
+
result->loss_per_datapoint = true;
|
|
360
|
+
break;
|
|
361
|
+
}
|
|
362
|
+
case GGML_OPT_LOSS_TYPE_SUM: {
|
|
363
|
+
result->loss = ggml_sum(result->ctx_static, result->outputs);
|
|
364
|
+
ggml_set_name(result->loss, "loss_sum");
|
|
365
|
+
result->loss_per_datapoint = false;
|
|
366
|
+
break;
|
|
367
|
+
}
|
|
368
|
+
case GGML_OPT_LOSS_TYPE_CROSS_ENTROPY: {
|
|
369
|
+
result->labels = ggml_dup_tensor(result->ctx_static, result->outputs);
|
|
370
|
+
ggml_set_input(result->labels);
|
|
371
|
+
ggml_set_name(result->labels, "labels");
|
|
372
|
+
result->loss = ggml_cross_entropy_loss(result->ctx_static, result->outputs, result->labels);
|
|
373
|
+
ggml_set_name(result->loss, "loss_cross_entropy");
|
|
374
|
+
if (result->opt_period > 1) {
|
|
375
|
+
result->loss = ggml_scale(result->ctx_static, result->loss, 1.0f / result->opt_period);
|
|
376
|
+
ggml_set_name(result->loss, "loss_cross_entropy_scaled");
|
|
377
|
+
}
|
|
378
|
+
result->loss_per_datapoint = true;
|
|
379
|
+
break;
|
|
380
|
+
}
|
|
381
|
+
case GGML_OPT_LOSS_TYPE_MEAN_SQUARED_ERROR: {
|
|
382
|
+
result->labels = ggml_dup_tensor(result->ctx_static, result->outputs);
|
|
383
|
+
ggml_set_input(result->labels);
|
|
384
|
+
ggml_set_name(result->labels, "labels");
|
|
385
|
+
result->loss = ggml_sub(result->ctx_static, result->outputs, result->labels);
|
|
386
|
+
ggml_set_name(result->loss, "loss_error");
|
|
387
|
+
result->loss = ggml_sqr(result->ctx_static, result->loss);
|
|
388
|
+
ggml_set_name(result->loss, "loss_squared_error");
|
|
389
|
+
result->loss = ggml_sum(result->ctx_static, result->loss);
|
|
390
|
+
ggml_set_name(result->loss, "loss_sum_squared_error");
|
|
391
|
+
const float scale = 1.0f / (result->opt_period * ggml_nelements(result->outputs));
|
|
392
|
+
result->loss = ggml_scale(result->ctx_static, result->loss, scale);
|
|
393
|
+
ggml_set_name(result->loss, "loss_mean_squared_error");
|
|
394
|
+
result->loss_per_datapoint = true;
|
|
395
|
+
break;
|
|
396
|
+
}
|
|
397
|
+
}
|
|
398
|
+
ggml_set_output(result->loss);
|
|
399
|
+
ggml_set_loss(result->loss);
|
|
400
|
+
ggml_build_forward_expand(result->gf, result->loss);
|
|
401
|
+
|
|
402
|
+
result->pred = ggml_argmax(result->ctx_static, result->outputs);
|
|
403
|
+
ggml_set_name(result->pred, "pred");
|
|
404
|
+
ggml_set_output(result->pred);
|
|
405
|
+
ggml_build_forward_expand(result->gf, result->pred);
|
|
406
|
+
|
|
407
|
+
if (result->labels) {
|
|
408
|
+
result->ncorrect = ggml_count_equal(result->ctx_static, result->pred, ggml_argmax(result->ctx_static, result->labels));
|
|
409
|
+
ggml_set_name(result->ncorrect, "ncorrect");
|
|
410
|
+
ggml_set_output(result->ncorrect);
|
|
411
|
+
ggml_build_forward_expand(result->gf, result->ncorrect);
|
|
412
|
+
} else {
|
|
413
|
+
result->ncorrect = nullptr;
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
if (params.build_type == GGML_OPT_BUILD_TYPE_FORWARD) {
|
|
417
|
+
result->buf_static = ggml_backend_alloc_ctx_tensors(result->ctx_static, ggml_backend_sched_get_backend(result->backend_sched, 0));
|
|
418
|
+
return result;
|
|
419
|
+
}
|
|
420
|
+
|
|
421
|
+
// gb_grad == graph backward gradients, forward pass, then backward pass to calculate gradients.
|
|
422
|
+
result->gb_grad = ggml_graph_dup(result->ctx_compute, result->gf);
|
|
423
|
+
ggml_build_backward_expand(result->ctx_static, result->ctx_compute, result->gb_grad, accumulate);
|
|
424
|
+
|
|
425
|
+
if (params.build_type == GGML_OPT_BUILD_TYPE_GRAD) {
|
|
426
|
+
result->buf_static = ggml_backend_alloc_ctx_tensors(result->ctx_static, ggml_backend_sched_get_backend(result->backend_sched, 0));
|
|
427
|
+
ggml_graph_reset(result->gb_grad);
|
|
428
|
+
return result;
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
GGML_ASSERT(params.build_type == GGML_OPT_BUILD_TYPE_OPT);
|
|
432
|
+
|
|
433
|
+
// gb_opt == graph backward optimize, forward pass, then backward pass to calculate gradients, then optimizer step.
|
|
434
|
+
result->gb_opt = ggml_graph_dup(result->ctx_compute, result->gb_grad);
|
|
435
|
+
|
|
436
|
+
result->adamw_params = ggml_new_tensor_1d(result->ctx_static_cpu, GGML_TYPE_F32, 7);
|
|
437
|
+
ggml_set_input(result->adamw_params);
|
|
438
|
+
ggml_set_name(result->adamw_params, "adamw_params");
|
|
439
|
+
|
|
440
|
+
for (int i = result->gf->n_nodes-1; i >= 0; --i) {
|
|
441
|
+
struct ggml_tensor * node = result->gb_opt->nodes[i];
|
|
442
|
+
struct ggml_tensor * grad = ggml_graph_get_grad(result->gb_opt, node);
|
|
443
|
+
|
|
444
|
+
if (node->flags & GGML_TENSOR_FLAG_PARAM) {
|
|
445
|
+
struct ggml_tensor * m = ggml_dup_tensor(result->ctx_static, node);
|
|
446
|
+
struct ggml_tensor * v = ggml_dup_tensor(result->ctx_static, node);
|
|
447
|
+
struct ggml_tensor * opt_step = ggml_opt_step_adamw(result->ctx_compute, node, grad, m, v, result->adamw_params);
|
|
448
|
+
ggml_build_forward_expand(result->gb_opt, opt_step);
|
|
449
|
+
}
|
|
450
|
+
}
|
|
451
|
+
|
|
452
|
+
result->buf_static = ggml_backend_alloc_ctx_tensors(
|
|
453
|
+
result->ctx_static, ggml_backend_sched_get_backend(result->backend_sched, 0));
|
|
454
|
+
|
|
455
|
+
result->buf_static_cpu = ggml_backend_alloc_ctx_tensors_from_buft(result->ctx_static_cpu, ggml_backend_cpu_buffer_type());
|
|
456
|
+
|
|
457
|
+
ggml_graph_reset(result->gb_opt);
|
|
458
|
+
|
|
459
|
+
return result;
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
void ggml_opt_free(ggml_opt_context_t opt_ctx) {
|
|
463
|
+
if (opt_ctx == nullptr) {
|
|
464
|
+
return;
|
|
465
|
+
}
|
|
466
|
+
ggml_backend_buffer_free(opt_ctx->buf_static);
|
|
467
|
+
ggml_backend_buffer_free(opt_ctx->buf_static_cpu);
|
|
468
|
+
ggml_free(opt_ctx->ctx_static);
|
|
469
|
+
ggml_free(opt_ctx->ctx_static_cpu);
|
|
470
|
+
delete opt_ctx;
|
|
471
|
+
}
|
|
472
|
+
|
|
473
|
+
void ggml_opt_reset(ggml_opt_context_t opt_ctx, bool optimizer) {
|
|
474
|
+
if (optimizer) {
|
|
475
|
+
ggml_graph_reset(opt_ctx->gb_opt);
|
|
476
|
+
opt_ctx->iter = 1;
|
|
477
|
+
} else {
|
|
478
|
+
ggml_graph_reset(opt_ctx->gb_grad);
|
|
479
|
+
}
|
|
480
|
+
}
|
|
481
|
+
|
|
482
|
+
struct ggml_tensor * ggml_opt_inputs(ggml_opt_context_t opt_ctx) {
|
|
483
|
+
return opt_ctx->inputs;
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
struct ggml_tensor * ggml_opt_outputs(ggml_opt_context_t opt_ctx) {
|
|
487
|
+
return opt_ctx->outputs;
|
|
488
|
+
}
|
|
489
|
+
|
|
490
|
+
struct ggml_tensor * ggml_opt_labels(ggml_opt_context_t opt_ctx) {
|
|
491
|
+
return opt_ctx->labels;
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
struct ggml_tensor * ggml_opt_loss(ggml_opt_context_t opt_ctx) {
|
|
495
|
+
return opt_ctx->loss;
|
|
496
|
+
}
|
|
497
|
+
|
|
498
|
+
struct ggml_tensor * ggml_opt_pred(ggml_opt_context_t opt_ctx) {
|
|
499
|
+
return opt_ctx->pred;
|
|
500
|
+
}
|
|
501
|
+
|
|
502
|
+
struct ggml_tensor * ggml_opt_ncorrect(ggml_opt_context_t opt_ctx) {
|
|
503
|
+
return opt_ctx->ncorrect;
|
|
504
|
+
}
|
|
505
|
+
|
|
506
|
+
struct ggml_tensor * ggml_opt_grad_acc(ggml_opt_context_t opt_ctx, struct ggml_tensor * node) {
|
|
507
|
+
return ggml_graph_get_grad_acc(opt_ctx->gb_opt, node);
|
|
508
|
+
}
|
|
509
|
+
|
|
510
|
+
// ====== Optimization Result ======
|
|
511
|
+
|
|
512
|
+
ggml_opt_result_t ggml_opt_result_init() {
|
|
513
|
+
return new ggml_opt_result;
|
|
514
|
+
}
|
|
515
|
+
|
|
516
|
+
void ggml_opt_result_free(ggml_opt_result_t result) {
|
|
517
|
+
delete result;
|
|
518
|
+
}
|
|
519
|
+
|
|
520
|
+
void ggml_opt_result_reset(ggml_opt_result_t result) {
|
|
521
|
+
result->ndata = 0;
|
|
522
|
+
result->loss.clear();
|
|
523
|
+
result->pred.clear();
|
|
524
|
+
result->ncorrect = 0;
|
|
525
|
+
}
|
|
526
|
+
|
|
527
|
+
void ggml_opt_result_ndata(ggml_opt_result_t result, int64_t * ndata) {
|
|
528
|
+
*ndata = result->ndata;
|
|
529
|
+
}
|
|
530
|
+
|
|
531
|
+
void ggml_opt_result_loss(ggml_opt_result_t result, double * loss, double * unc) {
|
|
532
|
+
const int64_t nbatches = result->loss.size(); // Number of physical batches.
|
|
533
|
+
|
|
534
|
+
if (nbatches == 0) {
|
|
535
|
+
*loss = 0.0;
|
|
536
|
+
*unc = NAN;
|
|
537
|
+
return;
|
|
538
|
+
}
|
|
539
|
+
|
|
540
|
+
double sum = 0.0;
|
|
541
|
+
double sum_squared = 0.0;
|
|
542
|
+
|
|
543
|
+
for (const float & loss : result->loss) {
|
|
544
|
+
// If the loss is per datapoint it was scaled by 1.0f/opt_period for each physical batch.
|
|
545
|
+
const float loss_scaled = result->loss_per_datapoint ? loss*result->opt_period : loss;
|
|
546
|
+
sum += loss_scaled;
|
|
547
|
+
sum_squared += loss_scaled*loss_scaled;
|
|
548
|
+
}
|
|
549
|
+
|
|
550
|
+
const double mean = sum/nbatches;
|
|
551
|
+
*loss = result->loss_per_datapoint ? mean : sum;
|
|
552
|
+
|
|
553
|
+
if (!unc) {
|
|
554
|
+
return;
|
|
555
|
+
}
|
|
556
|
+
|
|
557
|
+
if (nbatches < 2) {
|
|
558
|
+
*unc = NAN;
|
|
559
|
+
return;
|
|
560
|
+
}
|
|
561
|
+
|
|
562
|
+
const double var_sum = sum_squared/nbatches - mean*mean; // variance without Bessel's correction, i.e. nbatches/(nbatches-1)
|
|
563
|
+
*unc = result->loss_per_datapoint ? sqrt(var_sum / (nbatches - 1)) : sqrt(var_sum * nbatches/(nbatches - 1));
|
|
564
|
+
}
|
|
565
|
+
|
|
566
|
+
void ggml_opt_result_pred(ggml_opt_result_t result, int32_t * pred) {
|
|
567
|
+
for (size_t i = 0; i < result->pred.size(); ++i) {
|
|
568
|
+
pred[i] = result->pred[i];
|
|
569
|
+
}
|
|
570
|
+
}
|
|
571
|
+
|
|
572
|
+
void ggml_opt_result_accuracy(ggml_opt_result_t result, double * accuracy, double * unc) {
|
|
573
|
+
*accuracy = result->ncorrect >= 0 ? double(result->ncorrect) / double(result->ndata) : NAN;
|
|
574
|
+
|
|
575
|
+
if (!unc) {
|
|
576
|
+
return;
|
|
577
|
+
}
|
|
578
|
+
|
|
579
|
+
*unc = result->ncorrect >= 0 && result->ndata >= 2 ?
|
|
580
|
+
sqrt((*accuracy) * (1.0 - (*accuracy)) / double(result->ndata - 1)) : NAN;
|
|
581
|
+
}
|
|
582
|
+
|
|
583
|
+
// ====== Computation ======
|
|
584
|
+
|
|
585
|
+
static void ggml_opt_eval_graph(ggml_opt_context_t opt_ctx, ggml_cgraph * graph, ggml_opt_result * result) {
|
|
586
|
+
if (graph != opt_ctx->gf) {
|
|
587
|
+
struct ggml_opt_optimizer_params opt_pars = opt_ctx->get_opt_pars(opt_ctx->get_opt_pars_ud);
|
|
588
|
+
|
|
589
|
+
GGML_ASSERT(opt_pars.adamw.alpha > 0.0f);
|
|
590
|
+
GGML_ASSERT(opt_pars.adamw.beta1 >= 0.0f);
|
|
591
|
+
GGML_ASSERT(opt_pars.adamw.beta1 <= 1.0f);
|
|
592
|
+
GGML_ASSERT(opt_pars.adamw.beta2 >= 0.0f);
|
|
593
|
+
GGML_ASSERT(opt_pars.adamw.beta2 <= 1.0f);
|
|
594
|
+
GGML_ASSERT(opt_pars.adamw.eps >= 0.0f);
|
|
595
|
+
GGML_ASSERT(opt_pars.adamw.wd >= 0.0f);
|
|
596
|
+
GGML_ASSERT(opt_pars.adamw.wd <= 1.0f);
|
|
597
|
+
|
|
598
|
+
// beta1, beta2 after applying warmup
|
|
599
|
+
const float beta1h = 1.0f/(1.0f - powf(opt_pars.adamw.beta1, opt_ctx->iter));
|
|
600
|
+
const float beta2h = 1.0f/(1.0f - powf(opt_pars.adamw.beta2, opt_ctx->iter));
|
|
601
|
+
|
|
602
|
+
float * adamw_par_data = ggml_get_data_f32(opt_ctx->adamw_params);
|
|
603
|
+
adamw_par_data[0] = opt_pars.adamw.alpha;
|
|
604
|
+
adamw_par_data[1] = opt_pars.adamw.beta1;
|
|
605
|
+
adamw_par_data[2] = opt_pars.adamw.beta2;
|
|
606
|
+
adamw_par_data[3] = opt_pars.adamw.eps;
|
|
607
|
+
adamw_par_data[4] = opt_pars.adamw.wd;
|
|
608
|
+
adamw_par_data[5] = beta1h;
|
|
609
|
+
adamw_par_data[6] = beta2h;
|
|
610
|
+
}
|
|
611
|
+
|
|
612
|
+
ggml_opt_alloc_graph(opt_ctx, graph);
|
|
613
|
+
ggml_backend_sched_graph_compute(opt_ctx->backend_sched, opt_ctx->allocated_graph_copy);
|
|
614
|
+
opt_ctx->iter += opt_ctx->allocated_graph == opt_ctx->gb_opt;
|
|
615
|
+
|
|
616
|
+
if (!result) {
|
|
617
|
+
return;
|
|
618
|
+
}
|
|
619
|
+
|
|
620
|
+
if (result->ndata == 0) {
|
|
621
|
+
result->loss_per_datapoint = opt_ctx->loss_per_datapoint;
|
|
622
|
+
result->opt_period = opt_ctx->opt_period;
|
|
623
|
+
} else {
|
|
624
|
+
GGML_ASSERT(result->loss_per_datapoint == opt_ctx->loss_per_datapoint);
|
|
625
|
+
GGML_ASSERT(result->opt_period == opt_ctx->opt_period);
|
|
626
|
+
}
|
|
627
|
+
|
|
628
|
+
const int64_t ndata = opt_ctx->outputs->ne[1];
|
|
629
|
+
GGML_ASSERT(result->ndata == ndata*int64_t(result->loss.size()) && "varying batch size not supported");
|
|
630
|
+
result->ndata += ndata;
|
|
631
|
+
|
|
632
|
+
GGML_ASSERT(ggml_is_scalar(opt_ctx->loss));
|
|
633
|
+
GGML_ASSERT(opt_ctx->loss->type == GGML_TYPE_F32);
|
|
634
|
+
float loss;
|
|
635
|
+
ggml_backend_tensor_get(opt_ctx->loss, &loss, 0, ggml_nbytes(opt_ctx->loss));
|
|
636
|
+
result->loss.push_back(loss);
|
|
637
|
+
|
|
638
|
+
GGML_ASSERT(opt_ctx->pred->type == GGML_TYPE_I32);
|
|
639
|
+
std::vector<int32_t> pred(ndata);
|
|
640
|
+
ggml_backend_tensor_get(opt_ctx->pred, pred.data(), 0, ggml_nbytes(opt_ctx->pred));
|
|
641
|
+
result->pred.insert(result->pred.end(), pred.begin(), pred.end());
|
|
642
|
+
|
|
643
|
+
if (!opt_ctx->labels || result->ncorrect < 0) {
|
|
644
|
+
result->ncorrect = -1;
|
|
645
|
+
return;
|
|
646
|
+
}
|
|
647
|
+
|
|
648
|
+
GGML_ASSERT(ggml_is_scalar(opt_ctx->ncorrect));
|
|
649
|
+
GGML_ASSERT(opt_ctx->ncorrect->type == GGML_TYPE_I64);
|
|
650
|
+
int64_t ncorrect;
|
|
651
|
+
ggml_backend_tensor_get(opt_ctx->ncorrect, &ncorrect, 0, ggml_nbytes(opt_ctx->ncorrect));
|
|
652
|
+
result->ncorrect += ncorrect;
|
|
653
|
+
}
|
|
654
|
+
|
|
655
|
+
void ggml_opt_forward(ggml_opt_context_t opt_ctx, ggml_opt_result * result) {
|
|
656
|
+
ggml_opt_eval_graph(opt_ctx, opt_ctx->gf, result);
|
|
657
|
+
}
|
|
658
|
+
|
|
659
|
+
void ggml_opt_forward_backward(ggml_opt_context_t opt_ctx, ggml_opt_result * result) {
|
|
660
|
+
if (opt_ctx->opt_period == 1) {
|
|
661
|
+
ggml_opt_eval_graph(opt_ctx, opt_ctx->gb_opt, result);
|
|
662
|
+
return;
|
|
663
|
+
}
|
|
664
|
+
|
|
665
|
+
const int32_t opt_i_next = (opt_ctx->opt_i + 1) % opt_ctx->opt_period;
|
|
666
|
+
if (opt_i_next == 0) {
|
|
667
|
+
ggml_opt_eval_graph(opt_ctx, opt_ctx->gb_opt, result);
|
|
668
|
+
ggml_opt_reset(opt_ctx, /*optimizer =*/ false);
|
|
669
|
+
} else {
|
|
670
|
+
ggml_opt_eval_graph(opt_ctx, opt_ctx->gb_grad, result);
|
|
671
|
+
}
|
|
672
|
+
opt_ctx->opt_i = opt_i_next;
|
|
673
|
+
}
|
|
674
|
+
|
|
675
|
+
// ====== High-Level Functions ======
|
|
676
|
+
|
|
677
|
+
void ggml_opt_epoch(
|
|
678
|
+
ggml_opt_context_t opt_ctx,
|
|
679
|
+
ggml_opt_dataset_t dataset,
|
|
680
|
+
ggml_opt_result_t result_train,
|
|
681
|
+
ggml_opt_result_t result_eval,
|
|
682
|
+
int64_t idata_split,
|
|
683
|
+
ggml_opt_epoch_callback callback_train,
|
|
684
|
+
ggml_opt_epoch_callback callback_eval) {
|
|
685
|
+
struct ggml_tensor * inputs = ggml_opt_inputs(opt_ctx);
|
|
686
|
+
struct ggml_tensor * labels = ggml_opt_labels(opt_ctx);
|
|
687
|
+
struct ggml_tensor * data = ggml_opt_dataset_data(dataset);
|
|
688
|
+
GGML_ASSERT(data->ne[0] == inputs->ne[0]);
|
|
689
|
+
|
|
690
|
+
const int64_t ndata = data->ne[1];
|
|
691
|
+
const int64_t ndata_batch = inputs->ne[1];
|
|
692
|
+
|
|
693
|
+
GGML_ASSERT(data->ne[1] % inputs->ne[1] == 0);
|
|
694
|
+
const int64_t nbatches = ndata/ndata_batch;
|
|
695
|
+
|
|
696
|
+
idata_split = idata_split < 0 ? ndata : idata_split;
|
|
697
|
+
GGML_ASSERT(idata_split % ndata_batch == 0);
|
|
698
|
+
const int64_t ibatch_split = idata_split / ndata_batch;
|
|
699
|
+
|
|
700
|
+
int64_t ibatch = 0;
|
|
701
|
+
int64_t t_loop_start = ggml_time_us();
|
|
702
|
+
for (; ibatch < ibatch_split; ++ibatch) {
|
|
703
|
+
ggml_opt_dataset_get_batch(dataset, inputs, labels, ibatch);
|
|
704
|
+
ggml_opt_forward_backward(opt_ctx, result_train);
|
|
705
|
+
if (callback_train) {
|
|
706
|
+
callback_train(true, opt_ctx, dataset, result_train, ibatch+1, ibatch_split, t_loop_start);
|
|
707
|
+
}
|
|
708
|
+
}
|
|
709
|
+
t_loop_start = ggml_time_us();
|
|
710
|
+
for (; ibatch < nbatches; ++ibatch) {
|
|
711
|
+
ggml_opt_dataset_get_batch(dataset, inputs, labels, ibatch);
|
|
712
|
+
ggml_opt_forward(opt_ctx, result_eval);
|
|
713
|
+
if (callback_eval) {
|
|
714
|
+
callback_eval(false, opt_ctx, dataset, result_eval, ibatch+1-ibatch_split, nbatches-ibatch_split, t_loop_start);
|
|
715
|
+
}
|
|
716
|
+
}
|
|
717
|
+
}
|
|
718
|
+
|
|
719
|
+
void ggml_opt_epoch_callback_progress_bar(
|
|
720
|
+
bool train,
|
|
721
|
+
ggml_opt_context_t opt_ctx,
|
|
722
|
+
ggml_opt_dataset_t dataset,
|
|
723
|
+
ggml_opt_result_t result,
|
|
724
|
+
int64_t ibatch,
|
|
725
|
+
int64_t ibatch_max,
|
|
726
|
+
int64_t t_start_us) {
|
|
727
|
+
fprintf(stderr, "%s[", train ? "train: " : "val: ");
|
|
728
|
+
|
|
729
|
+
constexpr int64_t bar_length = 25;
|
|
730
|
+
for (int64_t j = 0; j < bar_length; ++j) {
|
|
731
|
+
const int64_t ibatch_j = ibatch_max * j/bar_length;
|
|
732
|
+
if (ibatch_j < ibatch) {
|
|
733
|
+
fprintf(stderr, "=");
|
|
734
|
+
} else if (ibatch_max * (j - 1)/bar_length < ibatch) {
|
|
735
|
+
fprintf(stderr, ">");
|
|
736
|
+
} else {
|
|
737
|
+
fprintf(stderr, " ");
|
|
738
|
+
}
|
|
739
|
+
}
|
|
740
|
+
|
|
741
|
+
const int64_t batch_size = ggml_opt_inputs(opt_ctx)->ne[1];
|
|
742
|
+
const int64_t idata = ibatch*batch_size;
|
|
743
|
+
const int64_t idata_max = ibatch_max*batch_size;
|
|
744
|
+
|
|
745
|
+
double loss;
|
|
746
|
+
double loss_unc;
|
|
747
|
+
ggml_opt_result_loss(result, &loss, &loss_unc);
|
|
748
|
+
|
|
749
|
+
double accuracy;
|
|
750
|
+
double accuracy_unc;
|
|
751
|
+
ggml_opt_result_accuracy(result, &accuracy, &accuracy_unc);
|
|
752
|
+
|
|
753
|
+
const int64_t t_ibatch_us = ggml_time_us() - t_start_us;
|
|
754
|
+
int64_t t_ibatch_s = t_ibatch_us / 1000000;
|
|
755
|
+
const int64_t t_ibatch_h = t_ibatch_s / 3600;
|
|
756
|
+
t_ibatch_s -= t_ibatch_h * 3600;
|
|
757
|
+
const int64_t t_ibatch_m = t_ibatch_s / 60;
|
|
758
|
+
t_ibatch_s -= t_ibatch_m * 60;
|
|
759
|
+
|
|
760
|
+
const int64_t t_eta_us = t_ibatch_us * (ibatch_max - ibatch)/ibatch;
|
|
761
|
+
int64_t t_eta_s = t_eta_us / 1000000;
|
|
762
|
+
const int64_t t_eta_h = t_eta_s / 3600;
|
|
763
|
+
t_eta_s -= t_eta_h * 3600;
|
|
764
|
+
const int64_t t_eta_m = t_eta_s / 60;
|
|
765
|
+
t_eta_s -= t_eta_m * 60;
|
|
766
|
+
|
|
767
|
+
fprintf(stderr, "| data=%06" PRId64 "/%06" PRId64 ", loss=%.6lf+-%.6lf, accuracy=%.2lf+-%.2lf%%, "
|
|
768
|
+
"t=%02" PRId64 ":%02" PRId64 ":%02" PRId64 ", ETA=%02" PRId64 ":%02" PRId64 ":%02" PRId64 "]\r",
|
|
769
|
+
idata, idata_max, loss, loss_unc, 100.0*accuracy, 100.0*accuracy_unc,
|
|
770
|
+
t_ibatch_h, t_ibatch_m, t_ibatch_s, t_eta_h, t_eta_m, t_eta_s);
|
|
771
|
+
if (ibatch == ibatch_max) {
|
|
772
|
+
fprintf(stderr, "\n");
|
|
773
|
+
}
|
|
774
|
+
fflush(stderr);
|
|
775
|
+
|
|
776
|
+
GGML_UNUSED(dataset);
|
|
777
|
+
}
|
|
778
|
+
|
|
779
|
+
void ggml_opt_fit(
|
|
780
|
+
ggml_backend_sched_t backend_sched,
|
|
781
|
+
ggml_context * ctx_compute,
|
|
782
|
+
ggml_tensor * inputs,
|
|
783
|
+
ggml_tensor * outputs,
|
|
784
|
+
ggml_opt_dataset_t dataset,
|
|
785
|
+
enum ggml_opt_loss_type loss_type,
|
|
786
|
+
ggml_opt_get_optimizer_params get_opt_pars,
|
|
787
|
+
int64_t nepoch,
|
|
788
|
+
int64_t nbatch_logical,
|
|
789
|
+
float val_split,
|
|
790
|
+
bool silent) {
|
|
791
|
+
ggml_time_init();
|
|
792
|
+
const int64_t t_start_us = ggml_time_us();
|
|
793
|
+
|
|
794
|
+
const int64_t ndata = ggml_opt_dataset_data(dataset)->ne[1];
|
|
795
|
+
const int64_t nbatch_physical = inputs->ne[1];
|
|
796
|
+
GGML_ASSERT(ndata % nbatch_logical == 0);
|
|
797
|
+
GGML_ASSERT(nbatch_logical % nbatch_physical == 0);
|
|
798
|
+
|
|
799
|
+
const int64_t opt_period = nbatch_logical / nbatch_physical;
|
|
800
|
+
const int64_t nbatches_logical = ndata / nbatch_logical;
|
|
801
|
+
|
|
802
|
+
GGML_ASSERT(val_split >= 0.0f);
|
|
803
|
+
GGML_ASSERT(val_split < 1.0f);
|
|
804
|
+
const int64_t ibatch_split = int64_t(((1.0f - val_split) * nbatches_logical)) * opt_period; // train <-> val split index (physical)
|
|
805
|
+
const int64_t idata_split = ibatch_split * nbatch_physical;
|
|
806
|
+
|
|
807
|
+
int64_t epoch = 1;
|
|
808
|
+
|
|
809
|
+
ggml_opt_params params = ggml_opt_default_params(backend_sched, ctx_compute, inputs, outputs, loss_type);
|
|
810
|
+
params.opt_period = opt_period;
|
|
811
|
+
params.get_opt_pars = get_opt_pars;
|
|
812
|
+
params.get_opt_pars_ud = &epoch;
|
|
813
|
+
ggml_opt_context_t opt_ctx = ggml_opt_init(params);
|
|
814
|
+
|
|
815
|
+
// Shuffling the data is generally useful but there is only a point if not all data is used in a single batch.
|
|
816
|
+
if (nbatch_logical < ndata) {
|
|
817
|
+
ggml_opt_dataset_shuffle(opt_ctx, dataset, -1); // Shuffle all data (train + validation).
|
|
818
|
+
}
|
|
819
|
+
|
|
820
|
+
ggml_opt_result_t result_train = ggml_opt_result_init();
|
|
821
|
+
ggml_opt_result_t result_val = ggml_opt_result_init();
|
|
822
|
+
|
|
823
|
+
ggml_opt_epoch_callback epoch_callback = silent ? nullptr : ggml_opt_epoch_callback_progress_bar;
|
|
824
|
+
|
|
825
|
+
for (; epoch <= nepoch; ++epoch) {
|
|
826
|
+
if (nbatch_logical < idata_split) {
|
|
827
|
+
ggml_opt_dataset_shuffle(opt_ctx, dataset, idata_split);
|
|
828
|
+
}
|
|
829
|
+
|
|
830
|
+
ggml_opt_result_reset(result_train);
|
|
831
|
+
ggml_opt_result_reset(result_val);
|
|
832
|
+
|
|
833
|
+
if (!silent) {
|
|
834
|
+
fprintf(stderr, "%s: epoch %04" PRId64 "/%04" PRId64 ":\n", __func__, epoch, nepoch);
|
|
835
|
+
}
|
|
836
|
+
ggml_opt_epoch(opt_ctx, dataset, result_train, result_val, idata_split, epoch_callback, epoch_callback);
|
|
837
|
+
if (!silent) {
|
|
838
|
+
fprintf(stderr, "\n");
|
|
839
|
+
}
|
|
840
|
+
}
|
|
841
|
+
|
|
842
|
+
if (!silent) {
|
|
843
|
+
int64_t t_total_s = (ggml_time_us() - t_start_us) / 1000000;
|
|
844
|
+
const int64_t t_total_h = t_total_s / 3600;
|
|
845
|
+
t_total_s -= t_total_h * 3600;
|
|
846
|
+
const int64_t t_total_m = t_total_s / 60;
|
|
847
|
+
t_total_s -= t_total_m * 60;
|
|
848
|
+
fprintf(stderr, "%s: training took %02" PRId64 ":%02" PRId64 ":%02" PRId64 "\n", __func__, t_total_h, t_total_m, t_total_s);
|
|
849
|
+
}
|
|
850
|
+
|
|
851
|
+
ggml_opt_free(opt_ctx);
|
|
852
|
+
ggml_opt_result_free(result_train);
|
|
853
|
+
ggml_opt_result_free(result_val);
|
|
854
|
+
}
|