cui-llama.rn 1.6.0 → 1.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +35 -7
- package/android/src/main/CMakeLists.txt +22 -11
- package/android/src/main/java/com/rnllama/LlamaContext.java +42 -6
- package/android/src/main/java/com/rnllama/RNLlama.java +139 -4
- package/android/src/main/jni.cpp +173 -18
- package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
- package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +24 -4
- package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +22 -2
- package/cpp/LICENSE +21 -0
- package/cpp/chat.cpp +129 -107
- package/cpp/chat.h +2 -0
- package/cpp/common.cpp +58 -78
- package/cpp/common.h +29 -21
- package/cpp/ggml-alloc.c +4 -1
- package/cpp/ggml-backend.cpp +9 -5
- package/cpp/ggml-backend.h +4 -4
- package/cpp/ggml-cpp.h +1 -1
- package/cpp/ggml-cpu/amx/amx.cpp +221 -0
- package/cpp/ggml-cpu/amx/amx.h +8 -0
- package/cpp/ggml-cpu/amx/common.h +91 -0
- package/cpp/ggml-cpu/amx/mmq.cpp +2511 -0
- package/cpp/ggml-cpu/amx/mmq.h +10 -0
- package/{ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers → cpp/ggml-cpu}/binary-ops.h +1 -1
- package/cpp/ggml-cpu/common.h +72 -0
- package/cpp/{ggml-cpu-aarch64.cpp → ggml-cpu/ggml-cpu-aarch64.cpp} +809 -103
- package/cpp/{ggml-cpu-quants.c → ggml-cpu/ggml-cpu-quants.c} +306 -6
- package/cpp/{ggml-cpu.c → ggml-cpu/ggml-cpu.c} +114 -55
- package/cpp/{ggml-cpu.cpp → ggml-cpu/ggml-cpu.cpp} +32 -16
- package/cpp/{ops.cpp → ggml-cpu/ops.cpp} +353 -173
- package/{ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers → cpp/ggml-cpu}/ops.h +2 -20
- package/cpp/{sgemm.cpp → ggml-cpu/sgemm.cpp} +501 -0
- package/{ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers → cpp/ggml-cpu}/simd-mappings.h +7 -3
- package/{ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers → cpp/ggml-cpu}/unary-ops.h +1 -1
- package/cpp/{vec.cpp → ggml-cpu/vec.cpp} +0 -6
- package/{ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers → cpp/ggml-cpu}/vec.h +16 -0
- package/cpp/ggml-cpu.h +5 -0
- package/cpp/ggml-impl.h +16 -9
- package/cpp/ggml-llama-sim.metallib +0 -0
- package/cpp/ggml-llama.metallib +0 -0
- package/cpp/ggml-metal-impl.h +36 -11
- package/cpp/ggml-metal.m +810 -176
- package/cpp/ggml-opt.cpp +373 -190
- package/cpp/ggml-opt.h +49 -28
- package/cpp/ggml-quants.c +0 -6
- package/cpp/ggml.c +227 -282
- package/cpp/ggml.h +82 -101
- package/cpp/gguf.cpp +33 -33
- package/cpp/json-schema-to-grammar.cpp +3 -0
- package/cpp/llama-adapter.cpp +6 -0
- package/cpp/llama-arch.cpp +49 -17
- package/cpp/llama-arch.h +9 -0
- package/cpp/llama-batch.cpp +8 -2
- package/cpp/llama-batch.h +2 -1
- package/cpp/llama-chat.cpp +39 -16
- package/cpp/llama-chat.h +4 -2
- package/cpp/llama-context.cpp +440 -611
- package/cpp/llama-context.h +44 -33
- package/cpp/llama-cparams.h +1 -0
- package/cpp/llama-graph.cpp +214 -291
- package/cpp/llama-graph.h +69 -21
- package/cpp/llama-hparams.cpp +17 -1
- package/cpp/llama-hparams.h +39 -5
- package/cpp/llama-kv-cache.cpp +2067 -620
- package/cpp/llama-kv-cache.h +410 -108
- package/cpp/llama-memory.h +12 -1
- package/cpp/llama-model-loader.cpp +24 -15
- package/cpp/llama-model-saver.cpp +281 -0
- package/cpp/llama-model-saver.h +37 -0
- package/cpp/llama-model.cpp +1089 -359
- package/cpp/llama-model.h +19 -3
- package/cpp/llama-sampling.cpp +20 -7
- package/cpp/llama-vocab.cpp +54 -9
- package/cpp/llama-vocab.h +6 -0
- package/cpp/llama.cpp +14 -0
- package/cpp/llama.h +86 -142
- package/cpp/minja/chat-template.hpp +9 -5
- package/cpp/minja/minja.hpp +69 -36
- package/cpp/rn-llama.cpp +602 -190
- package/cpp/rn-llama.h +34 -8
- package/cpp/sampling.cpp +57 -50
- package/cpp/tools/mtmd/clip-impl.h +462 -0
- package/cpp/tools/mtmd/clip.cpp +4024 -0
- package/cpp/tools/mtmd/clip.h +101 -0
- package/cpp/tools/mtmd/miniaudio.h +93468 -0
- package/cpp/tools/mtmd/mtmd-audio.cpp +855 -0
- package/cpp/tools/mtmd/mtmd-audio.h +62 -0
- package/cpp/tools/mtmd/mtmd-helper.cpp +297 -0
- package/cpp/tools/mtmd/mtmd.cpp +942 -0
- package/cpp/tools/mtmd/mtmd.h +362 -0
- package/cpp/tools/mtmd/stb_image.h +7988 -0
- package/ios/CMakeLists.txt +20 -10
- package/ios/RNLlama.h +6 -0
- package/ios/RNLlama.mm +82 -3
- package/ios/RNLlamaContext.h +5 -1
- package/ios/RNLlamaContext.mm +131 -38
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/chat.h +2 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/common.h +29 -21
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-backend.h +4 -4
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpp.h +1 -1
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu.h +5 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-impl.h +16 -9
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-opt.h +49 -28
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml.h +82 -101
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-arch.h +9 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-batch.h +2 -1
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-chat.h +4 -2
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-context.h +44 -33
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cparams.h +1 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-graph.h +69 -21
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-hparams.h +39 -5
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache.h +410 -108
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory.h +12 -1
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model-saver.h +37 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model.h +19 -3
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-vocab.h +6 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama.h +86 -142
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/minja.hpp +69 -36
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/rn-llama.h +34 -8
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/ggml-llama.metallib +0 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +2 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +29 -21
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +4 -4
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +1 -1
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +5 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +16 -9
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +49 -28
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +82 -101
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +9 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +2 -1
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +4 -2
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +44 -33
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +1 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +69 -21
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +39 -5
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +410 -108
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +12 -1
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-saver.h +37 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +19 -3
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +6 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +86 -142
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +69 -36
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +34 -8
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +1 -1
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/chat.h +2 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/common.h +29 -21
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-backend.h +4 -4
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpp.h +1 -1
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu.h +5 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-impl.h +16 -9
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-opt.h +49 -28
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml.h +82 -101
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-arch.h +9 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-batch.h +2 -1
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-chat.h +4 -2
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-context.h +44 -33
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cparams.h +1 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-graph.h +69 -21
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-hparams.h +39 -5
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache.h +410 -108
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory.h +12 -1
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model-saver.h +37 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model.h +19 -3
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-vocab.h +6 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama.h +86 -142
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/minja.hpp +69 -36
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/rn-llama.h +34 -8
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/ggml-llama.metallib +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +2 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +29 -21
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +4 -4
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +1 -1
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +5 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +16 -9
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +49 -28
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +82 -101
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +9 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +2 -1
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +4 -2
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +44 -33
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +1 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +69 -21
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +39 -5
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +410 -108
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +12 -1
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-saver.h +37 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +19 -3
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +6 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +86 -142
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +69 -36
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +34 -8
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +1 -1
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
- package/jest/mock.js +33 -7
- package/lib/commonjs/NativeRNLlama.js.map +1 -1
- package/lib/commonjs/index.js +153 -21
- package/lib/commonjs/index.js.map +1 -1
- package/lib/module/NativeRNLlama.js.map +1 -1
- package/lib/module/index.js +152 -20
- package/lib/module/index.js.map +1 -1
- package/lib/typescript/NativeRNLlama.d.ts +54 -4
- package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
- package/lib/typescript/index.d.ts +72 -6
- package/lib/typescript/index.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/NativeRNLlama.ts +72 -4
- package/src/index.ts +212 -38
- package/cpp/binary-ops.h +0 -16
- package/cpp/ops.h +0 -128
- package/cpp/simd-mappings.h +0 -888
- package/cpp/unary-ops.h +0 -28
- package/cpp/vec.h +0 -802
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/binary-ops.h +0 -16
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-aarch64.h +0 -8
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-impl.h +0 -512
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-quants.h +0 -63
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-traits.h +0 -38
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ops.h +0 -128
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/sgemm.h +0 -14
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/simd-mappings.h +0 -888
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/vec.h +0 -802
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-aarch64.h +0 -8
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-impl.h +0 -512
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-quants.h +0 -63
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-traits.h +0 -38
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/sgemm.h +0 -14
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/unary-ops.h +0 -28
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/vec.h +0 -802
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/binary-ops.h +0 -16
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-aarch64.h +0 -8
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-impl.h +0 -512
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-quants.h +0 -63
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-traits.h +0 -38
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ops.h +0 -128
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/sgemm.h +0 -14
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/simd-mappings.h +0 -888
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/unary-ops.h +0 -28
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/binary-ops.h +0 -16
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-aarch64.h +0 -8
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-impl.h +0 -512
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-quants.h +0 -63
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-traits.h +0 -38
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ops.h +0 -128
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/sgemm.h +0 -14
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/simd-mappings.h +0 -888
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/unary-ops.h +0 -28
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/vec.h +0 -802
- package/lib/commonjs/chat.js +0 -37
- package/lib/commonjs/chat.js.map +0 -1
- package/lib/module/chat.js +0 -33
- package/lib/module/chat.js.map +0 -1
- package/lib/typescript/chat.d.ts +0 -10
- package/lib/typescript/chat.d.ts.map +0 -1
- package/src/chat.ts +0 -44
- /package/cpp/{binary-ops.cpp → ggml-cpu/binary-ops.cpp} +0 -0
- /package/cpp/{ggml-cpu-aarch64.h → ggml-cpu/ggml-cpu-aarch64.h} +0 -0
- /package/cpp/{ggml-cpu-impl.h → ggml-cpu/ggml-cpu-impl.h} +0 -0
- /package/cpp/{ggml-cpu-quants.h → ggml-cpu/ggml-cpu-quants.h} +0 -0
- /package/cpp/{ggml-cpu-traits.cpp → ggml-cpu/ggml-cpu-traits.cpp} +0 -0
- /package/cpp/{ggml-cpu-traits.h → ggml-cpu/ggml-cpu-traits.h} +0 -0
- /package/cpp/{sgemm.h → ggml-cpu/sgemm.h} +0 -0
- /package/cpp/{unary-ops.cpp → ggml-cpu/unary-ops.cpp} +0 -0
@@ -16,7 +16,6 @@
|
|
16
16
|
|
17
17
|
namespace rnllama {
|
18
18
|
|
19
|
-
|
20
19
|
std::string tokens_to_output_formatted_string(const llama_context *ctx, const llama_token token);
|
21
20
|
|
22
21
|
std::string tokens_to_str(llama_context *ctx, const std::vector<llama_token>::const_iterator begin, const std::vector<llama_token>::const_iterator end);
|
@@ -42,6 +41,16 @@ struct completion_token_output
|
|
42
41
|
llama_token tok;
|
43
42
|
};
|
44
43
|
|
44
|
+
struct llama_rn_context_mtmd;
|
45
|
+
|
46
|
+
struct llama_rn_tokenize_result {
|
47
|
+
std::vector<llama_token> tokens;
|
48
|
+
bool has_media = false;
|
49
|
+
std::vector<std::string> bitmap_hashes;
|
50
|
+
std::vector<size_t> chunk_pos; // both text and media
|
51
|
+
std::vector<size_t> chunk_pos_media; // media only
|
52
|
+
};
|
53
|
+
|
45
54
|
// Main context class
|
46
55
|
struct llama_rn_context {
|
47
56
|
bool is_predicting = false;
|
@@ -52,8 +61,9 @@ struct llama_rn_context {
|
|
52
61
|
|
53
62
|
size_t num_prompt_tokens = 0;
|
54
63
|
size_t num_tokens_predicted = 0;
|
55
|
-
|
64
|
+
llama_pos n_past = 0;
|
56
65
|
size_t n_remain = 0;
|
66
|
+
std::vector<std::string> mtmd_bitmap_past_hashes;
|
57
67
|
|
58
68
|
std::vector<llama_token> embd;
|
59
69
|
common_params params;
|
@@ -69,6 +79,7 @@ struct llama_rn_context {
|
|
69
79
|
|
70
80
|
int n_ctx;
|
71
81
|
|
82
|
+
bool context_full = false;
|
72
83
|
bool truncated = false;
|
73
84
|
bool stopped_eos = false;
|
74
85
|
bool stopped_word = false;
|
@@ -78,6 +89,9 @@ struct llama_rn_context {
|
|
78
89
|
|
79
90
|
std::vector<common_adapter_lora_info> lora;
|
80
91
|
|
92
|
+
llama_rn_context_mtmd *mtmd_wrapper = nullptr;
|
93
|
+
bool has_multimodal = false;
|
94
|
+
|
81
95
|
~llama_rn_context();
|
82
96
|
|
83
97
|
void rewind();
|
@@ -97,8 +111,9 @@ struct llama_rn_context {
|
|
97
111
|
const std::string &chat_template
|
98
112
|
) const;
|
99
113
|
void truncatePrompt(std::vector<llama_token> &prompt_tokens);
|
100
|
-
void loadPrompt();
|
114
|
+
void loadPrompt(const std::vector<std::string> &media_paths);
|
101
115
|
void beginCompletion();
|
116
|
+
void endCompletion();
|
102
117
|
completion_token_output nextToken();
|
103
118
|
size_t findStoppingStrings(const std::string &text, const size_t last_token_size, const stop_type type);
|
104
119
|
completion_token_output doCompletion();
|
@@ -107,11 +122,22 @@ struct llama_rn_context {
|
|
107
122
|
int applyLoraAdapters(std::vector<common_adapter_lora_info> lora);
|
108
123
|
void removeLoraAdapters();
|
109
124
|
std::vector<common_adapter_lora_info> getLoadedLoraAdapters();
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
125
|
+
|
126
|
+
// Multimodal methods
|
127
|
+
bool initMultimodal(const std::string &mmproj_path, bool use_gpu);
|
128
|
+
bool isMultimodalEnabled() const;
|
129
|
+
bool isMultimodalSupportVision() const;
|
130
|
+
bool isMultimodalSupportAudio() const;
|
131
|
+
void releaseMultimodal();
|
132
|
+
|
133
|
+
// Process multiple media and add them to the context
|
134
|
+
void processMedia(
|
135
|
+
const std::string &prompt,
|
136
|
+
const std::vector<std::string> &media_paths
|
137
|
+
);
|
138
|
+
|
139
|
+
llama_rn_tokenize_result tokenize(const std::string &text, const std::vector<std::string> &media_paths);
|
140
|
+
};
|
115
141
|
|
116
142
|
// Logging macros
|
117
143
|
extern bool rnllama_verbose;
|
Binary file
|
Binary file
|
Binary file
|
package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h
CHANGED
@@ -3,6 +3,7 @@
|
|
3
3
|
#pragma once
|
4
4
|
|
5
5
|
#include "common.h"
|
6
|
+
#include <chrono>
|
6
7
|
#include <string>
|
7
8
|
#include <vector>
|
8
9
|
#include "minja/chat-template.hpp"
|
@@ -79,6 +80,7 @@ struct common_chat_templates_inputs {
|
|
79
80
|
common_chat_tool_choice tool_choice = COMMON_CHAT_TOOL_CHOICE_AUTO;
|
80
81
|
bool parallel_tool_calls = false;
|
81
82
|
bool extract_reasoning = true;
|
83
|
+
std::chrono::system_clock::time_point now = std::chrono::system_clock::now();
|
82
84
|
};
|
83
85
|
|
84
86
|
struct common_chat_params {
|
package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/common.h
CHANGED
@@ -6,6 +6,7 @@
|
|
6
6
|
|
7
7
|
#include <set>
|
8
8
|
#include <string>
|
9
|
+
#include <string_view>
|
9
10
|
#include <vector>
|
10
11
|
#include <sstream>
|
11
12
|
|
@@ -77,7 +78,6 @@ enum llama_example {
|
|
77
78
|
LLAMA_EXAMPLE_COMMON,
|
78
79
|
LLAMA_EXAMPLE_SPECULATIVE,
|
79
80
|
LLAMA_EXAMPLE_MAIN,
|
80
|
-
LLAMA_EXAMPLE_INFILL,
|
81
81
|
LLAMA_EXAMPLE_EMBEDDING,
|
82
82
|
LLAMA_EXAMPLE_PERPLEXITY,
|
83
83
|
LLAMA_EXAMPLE_RETRIEVAL,
|
@@ -87,7 +87,7 @@ enum llama_example {
|
|
87
87
|
LLAMA_EXAMPLE_SERVER,
|
88
88
|
LLAMA_EXAMPLE_CVECTOR_GENERATOR,
|
89
89
|
LLAMA_EXAMPLE_EXPORT_LORA,
|
90
|
-
|
90
|
+
LLAMA_EXAMPLE_MTMD,
|
91
91
|
LLAMA_EXAMPLE_LOOKUP,
|
92
92
|
LLAMA_EXAMPLE_PARALLEL,
|
93
93
|
LLAMA_EXAMPLE_TTS,
|
@@ -107,6 +107,7 @@ enum common_sampler_type {
|
|
107
107
|
COMMON_SAMPLER_TYPE_XTC = 8,
|
108
108
|
COMMON_SAMPLER_TYPE_INFILL = 9,
|
109
109
|
COMMON_SAMPLER_TYPE_PENALTIES = 10,
|
110
|
+
COMMON_SAMPLER_TYPE_TOP_N_SIGMA = 11,
|
110
111
|
};
|
111
112
|
|
112
113
|
// dimensionality reduction methods, used by cvector-generator
|
@@ -172,6 +173,7 @@ struct common_params_sampling {
|
|
172
173
|
std::vector<enum common_sampler_type> samplers = {
|
173
174
|
COMMON_SAMPLER_TYPE_PENALTIES,
|
174
175
|
COMMON_SAMPLER_TYPE_DRY,
|
176
|
+
COMMON_SAMPLER_TYPE_TOP_N_SIGMA,
|
175
177
|
COMMON_SAMPLER_TYPE_TOP_K,
|
176
178
|
COMMON_SAMPLER_TYPE_TYPICAL_P,
|
177
179
|
COMMON_SAMPLER_TYPE_TOP_P,
|
@@ -336,17 +338,17 @@ struct common_params {
|
|
336
338
|
bool flash_attn = false; // flash attention
|
337
339
|
bool no_perf = false; // disable performance metrics
|
338
340
|
bool ctx_shift = true; // context shift on inifinite text generation
|
341
|
+
bool swa_full = false; // use full-size SWA cache (https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055)
|
339
342
|
|
340
343
|
bool input_prefix_bos = false; // prefix BOS to user inputs, preceding input_prefix
|
341
|
-
bool logits_all = false; // return logits for all tokens in the batch
|
342
344
|
bool use_mmap = true; // use mmap for faster loads
|
343
345
|
bool use_mlock = false; // use mlock to keep model in memory
|
344
346
|
bool verbose_prompt = false; // print prompt tokens before generation
|
345
347
|
bool display_prompt = true; // print prompt before generation
|
346
|
-
bool dump_kv_cache = false; // dump the KV cache contents for debugging purposes
|
347
348
|
bool no_kv_offload = false; // disable KV offloading
|
348
349
|
bool warmup = true; // warmup run
|
349
350
|
bool check_tensors = false; // validate tensor data
|
351
|
+
bool no_op_offload = false; // globally disable offload host tensor operations to device
|
350
352
|
|
351
353
|
bool single_turn = false; // single turn chat conversation
|
352
354
|
|
@@ -355,8 +357,10 @@ struct common_params {
|
|
355
357
|
|
356
358
|
common_conversation_mode conversation_mode = COMMON_CONVERSATION_MODE_AUTO;
|
357
359
|
|
358
|
-
// multimodal models (see
|
360
|
+
// multimodal models (see tools/mtmd)
|
359
361
|
struct common_params_model mmproj;
|
362
|
+
bool mmproj_use_gpu = true; // use GPU for multimodal model
|
363
|
+
bool no_mmproj = false; // explicitly disable multimodal model
|
360
364
|
std::vector<std::string> image; // path to image file(s)
|
361
365
|
|
362
366
|
// embedding
|
@@ -379,6 +383,7 @@ struct common_params {
|
|
379
383
|
bool use_jinja = false; // NOLINT
|
380
384
|
bool enable_chat_template = true;
|
381
385
|
common_reasoning_format reasoning_format = COMMON_REASONING_FORMAT_DEEPSEEK;
|
386
|
+
bool prefill_assistant = true; // if true, any trailing assistant message will be prefilled into the response
|
382
387
|
|
383
388
|
std::vector<std::string> api_keys;
|
384
389
|
|
@@ -422,13 +427,14 @@ struct common_params {
|
|
422
427
|
|
423
428
|
bool process_output = false; // collect data for the output tensor
|
424
429
|
bool compute_ppl = true; // whether to compute perplexity
|
430
|
+
bool parse_special = false; // whether to parse special tokens during imatrix tokenization
|
425
431
|
|
426
432
|
// cvector-generator params
|
427
433
|
int n_pca_batch = 100;
|
428
434
|
int n_pca_iterations = 1000;
|
429
435
|
dimre_method cvector_dimre_method = DIMRE_METHOD_PCA;
|
430
|
-
std::string cvector_positive_file = "
|
431
|
-
std::string cvector_negative_file = "
|
436
|
+
std::string cvector_positive_file = "tools/cvector-generator/positive.txt";
|
437
|
+
std::string cvector_negative_file = "tools/cvector-generator/negative.txt";
|
432
438
|
|
433
439
|
bool spm_infill = false; // suffix/prefix/middle pattern for infill
|
434
440
|
|
@@ -437,6 +443,11 @@ struct common_params {
|
|
437
443
|
|
438
444
|
// common params
|
439
445
|
std::string out_file; // output filename for all example programs
|
446
|
+
// optional callback for model loading progress and cancellation:
|
447
|
+
// called with a progress value between 0.0 and 1.0.
|
448
|
+
// return false from callback to abort model loading or true to continue
|
449
|
+
llama_progress_callback load_progress_callback = NULL;
|
450
|
+
void * load_progress_callback_user_data = NULL;
|
440
451
|
};
|
441
452
|
|
442
453
|
// call once at the start of a program if it uses libcommon
|
@@ -514,10 +525,9 @@ static bool string_starts_with(const std::string & str,
|
|
514
525
|
return str.rfind(prefix, 0) == 0;
|
515
526
|
}
|
516
527
|
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
}
|
528
|
+
// While we wait for C++20's std::string::ends_with...
|
529
|
+
bool string_ends_with(const std::string_view & str, const std::string_view & suffix);
|
530
|
+
size_t string_find_partial_stop(const std::string_view & str, const std::string_view & stop);
|
521
531
|
|
522
532
|
bool string_parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides);
|
523
533
|
void string_process_escapes(std::string & input);
|
@@ -558,6 +568,8 @@ struct lm_ggml_threadpool_params lm_ggml_threadpool_params_from_cpu_params(const
|
|
558
568
|
// clear LoRA adapters from context, then apply new list of adapters
|
559
569
|
void common_set_adapter_lora(struct llama_context * ctx, std::vector<common_adapter_lora_info> & lora);
|
560
570
|
|
571
|
+
std::string get_model_endpoint();
|
572
|
+
|
561
573
|
//
|
562
574
|
// Batch utils
|
563
575
|
//
|
@@ -624,16 +636,6 @@ std::string common_detokenize(
|
|
624
636
|
const std::vector<llama_token> & tokens,
|
625
637
|
bool special = true);
|
626
638
|
|
627
|
-
//
|
628
|
-
// KV cache utils
|
629
|
-
//
|
630
|
-
|
631
|
-
// Dump the KV cache view with the number of sequences per cell.
|
632
|
-
void common_kv_cache_dump_view(const llama_kv_cache_view & view, int row_size = 80);
|
633
|
-
|
634
|
-
// Dump the KV cache view showing individual sequences in each cell (long output).
|
635
|
-
void common_kv_cache_dump_view_seqs(const llama_kv_cache_view & view, int row_size = 40);
|
636
|
-
|
637
639
|
//
|
638
640
|
// Embedding utils
|
639
641
|
//
|
@@ -675,3 +677,9 @@ const char * const LLM_KV_SPLIT_COUNT = "split.count";
|
|
675
677
|
const char * const LLM_KV_SPLIT_TENSORS_COUNT = "split.tensors.count";
|
676
678
|
|
677
679
|
}
|
680
|
+
|
681
|
+
//
|
682
|
+
// training utils
|
683
|
+
//
|
684
|
+
|
685
|
+
lm_ggml_opt_dataset_t common_opt_dataset_init(struct llama_context * ctx, const std::vector<llama_token> & tokens, int64_t stride);
|
package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h
CHANGED
@@ -38,7 +38,7 @@ extern "C" {
|
|
38
38
|
LM_GGML_API lm_ggml_backend_buffer_t lm_ggml_backend_buft_alloc_buffer (lm_ggml_backend_buffer_type_t buft, size_t size);
|
39
39
|
LM_GGML_API size_t lm_ggml_backend_buft_get_alignment (lm_ggml_backend_buffer_type_t buft);
|
40
40
|
LM_GGML_API size_t lm_ggml_backend_buft_get_max_size (lm_ggml_backend_buffer_type_t buft);
|
41
|
-
LM_GGML_API size_t lm_ggml_backend_buft_get_alloc_size(lm_ggml_backend_buffer_type_t buft, struct lm_ggml_tensor * tensor);
|
41
|
+
LM_GGML_API size_t lm_ggml_backend_buft_get_alloc_size(lm_ggml_backend_buffer_type_t buft, const struct lm_ggml_tensor * tensor);
|
42
42
|
LM_GGML_API bool lm_ggml_backend_buft_is_host (lm_ggml_backend_buffer_type_t buft);
|
43
43
|
LM_GGML_API lm_ggml_backend_dev_t lm_ggml_backend_buft_get_device (lm_ggml_backend_buffer_type_t buft);
|
44
44
|
|
@@ -59,7 +59,7 @@ extern "C" {
|
|
59
59
|
LM_GGML_API enum lm_ggml_status lm_ggml_backend_buffer_init_tensor (lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor);
|
60
60
|
LM_GGML_API size_t lm_ggml_backend_buffer_get_alignment (lm_ggml_backend_buffer_t buffer);
|
61
61
|
LM_GGML_API size_t lm_ggml_backend_buffer_get_max_size (lm_ggml_backend_buffer_t buffer);
|
62
|
-
LM_GGML_API size_t lm_ggml_backend_buffer_get_alloc_size(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor);
|
62
|
+
LM_GGML_API size_t lm_ggml_backend_buffer_get_alloc_size(lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * tensor);
|
63
63
|
LM_GGML_API void lm_ggml_backend_buffer_clear (lm_ggml_backend_buffer_t buffer, uint8_t value);
|
64
64
|
LM_GGML_API bool lm_ggml_backend_buffer_is_host (lm_ggml_backend_buffer_t buffer);
|
65
65
|
LM_GGML_API void lm_ggml_backend_buffer_set_usage (lm_ggml_backend_buffer_t buffer, enum lm_ggml_backend_buffer_usage usage);
|
@@ -248,7 +248,7 @@ extern "C" {
|
|
248
248
|
// preferrably to run on the same backend as the buffer
|
249
249
|
lm_ggml_backend_buffer_set_usage(buf_weights, LM_GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
|
250
250
|
|
251
|
-
sched = lm_ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, LM_GGML_DEFAULT_GRAPH_SIZE, false);
|
251
|
+
sched = lm_ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, LM_GGML_DEFAULT_GRAPH_SIZE, false, true);
|
252
252
|
|
253
253
|
// initialize buffers from a max size graph (optional)
|
254
254
|
reserve_graph = build_graph(sched, max_batch_size);
|
@@ -289,7 +289,7 @@ extern "C" {
|
|
289
289
|
typedef bool (*lm_ggml_backend_sched_eval_callback)(struct lm_ggml_tensor * t, bool ask, void * user_data);
|
290
290
|
|
291
291
|
// Initialize a backend scheduler, backends with low index are given priority over backends with high index
|
292
|
-
LM_GGML_API lm_ggml_backend_sched_t lm_ggml_backend_sched_new(lm_ggml_backend_t * backends, lm_ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel);
|
292
|
+
LM_GGML_API lm_ggml_backend_sched_t lm_ggml_backend_sched_new(lm_ggml_backend_t * backends, lm_ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel, bool op_offload);
|
293
293
|
LM_GGML_API void lm_ggml_backend_sched_free(lm_ggml_backend_sched_t sched);
|
294
294
|
|
295
295
|
// Initialize backend buffers from a measure graph
|
package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h
CHANGED
@@ -24,7 +24,7 @@ typedef std::unique_ptr<lm_gguf_context, lm_gguf_context_deleter> lm_gguf_contex
|
|
24
24
|
|
25
25
|
struct lm_ggml_gallocr_deleter { void operator()(lm_ggml_gallocr_t galloc) { lm_ggml_gallocr_free(galloc); } };
|
26
26
|
|
27
|
-
typedef std::unique_ptr<
|
27
|
+
typedef std::unique_ptr<lm_ggml_gallocr, lm_ggml_gallocr_deleter> lm_ggml_gallocr_ptr;
|
28
28
|
|
29
29
|
// ggml-backend
|
30
30
|
|
package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h
CHANGED
@@ -133,6 +133,11 @@ extern "C" {
|
|
133
133
|
|
134
134
|
LM_GGML_BACKEND_API lm_ggml_backend_reg_t lm_ggml_backend_cpu_reg(void);
|
135
135
|
|
136
|
+
LM_GGML_BACKEND_API void lm_ggml_cpu_fp32_to_fp16(const float *, lm_ggml_fp16_t *, int64_t);
|
137
|
+
LM_GGML_BACKEND_API void lm_ggml_cpu_fp16_to_fp32(const lm_ggml_fp16_t *, float *, int64_t);
|
138
|
+
LM_GGML_BACKEND_API void lm_ggml_cpu_fp32_to_bf16(const float *, lm_ggml_bf16_t *, int64_t);
|
139
|
+
LM_GGML_BACKEND_API void lm_ggml_cpu_bf16_to_fp32(const lm_ggml_bf16_t *, float *, int64_t);
|
140
|
+
|
136
141
|
#ifdef __cplusplus
|
137
142
|
}
|
138
143
|
#endif
|
package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h
CHANGED
@@ -16,6 +16,14 @@
|
|
16
16
|
#include <arm_sve.h>
|
17
17
|
#endif // __ARM_FEATURE_SVE
|
18
18
|
|
19
|
+
#if defined(__ARM_NEON) && !defined(__CUDACC__) && !defined(__MUSACC__)
|
20
|
+
// if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
|
21
|
+
//
|
22
|
+
// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
|
23
|
+
//
|
24
|
+
#include <arm_neon.h>
|
25
|
+
#endif
|
26
|
+
|
19
27
|
#if defined(__F16C__)
|
20
28
|
#include <immintrin.h>
|
21
29
|
#endif
|
@@ -140,8 +148,14 @@ struct lm_ggml_map_custom2_op_params {
|
|
140
148
|
|
141
149
|
struct lm_ggml_map_custom3_op_params {
|
142
150
|
lm_ggml_custom3_op_t fun;
|
143
|
-
int
|
144
|
-
void
|
151
|
+
int n_tasks;
|
152
|
+
void * userdata;
|
153
|
+
};
|
154
|
+
|
155
|
+
struct lm_ggml_custom_op_params {
|
156
|
+
lm_ggml_custom_op_t fun;
|
157
|
+
int n_tasks;
|
158
|
+
void * userdata;
|
145
159
|
};
|
146
160
|
|
147
161
|
// bitset
|
@@ -311,13 +325,6 @@ LM_GGML_API void lm_ggml_aligned_free(void * ptr, size_t size);
|
|
311
325
|
// for MUSA compilers , we use uint16_t: ref https://github.com/ggml-org/llama.cpp/pull/11843
|
312
326
|
//
|
313
327
|
#if defined(__ARM_NEON) && !(defined(__CUDACC__) && __CUDACC_VER_MAJOR__ <= 11) && !defined(__MUSACC__)
|
314
|
-
|
315
|
-
// if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
|
316
|
-
//
|
317
|
-
// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
|
318
|
-
//
|
319
|
-
#include <arm_neon.h>
|
320
|
-
|
321
328
|
#define LM_GGML_COMPUTE_FP16_TO_FP32(x) lm_ggml_compute_fp16_to_fp32(x)
|
322
329
|
#define LM_GGML_COMPUTE_FP32_TO_FP16(x) lm_ggml_compute_fp32_to_fp16(x)
|
323
330
|
|
@@ -207,6 +207,10 @@ typedef struct {
|
|
207
207
|
float attn_factor;
|
208
208
|
float beta_fast;
|
209
209
|
float beta_slow;
|
210
|
+
int32_t sect_0;
|
211
|
+
int32_t sect_1;
|
212
|
+
int32_t sect_2;
|
213
|
+
int32_t sect_3;
|
210
214
|
} lm_ggml_metal_kargs_rope;
|
211
215
|
|
212
216
|
typedef struct {
|
@@ -299,21 +303,42 @@ typedef struct {
|
|
299
303
|
} lm_ggml_metal_kargs_mul_mv_ext;
|
300
304
|
|
301
305
|
typedef struct {
|
302
|
-
int32_t
|
303
|
-
int32_t
|
304
|
-
uint64_t
|
306
|
+
int32_t ne10;
|
307
|
+
int32_t ne11; // n_expert_used (bcast)
|
308
|
+
uint64_t nb11;
|
309
|
+
uint64_t nb12;
|
310
|
+
int32_t neh11; // n_tokens
|
311
|
+
uint64_t nbh11;
|
312
|
+
int32_t ne20; // n_expert_used
|
313
|
+
uint64_t nb21;
|
314
|
+
} lm_ggml_metal_kargs_mul_mm_id_map0;
|
315
|
+
|
316
|
+
typedef struct {
|
317
|
+
int32_t ne20; // n_expert_used
|
318
|
+
int32_t neh0;
|
319
|
+
int32_t neh1;
|
320
|
+
uint64_t nbh1;
|
321
|
+
uint64_t nbh2;
|
322
|
+
int32_t ne0;
|
323
|
+
uint64_t nb1;
|
324
|
+
uint64_t nb2;
|
325
|
+
} lm_ggml_metal_kargs_mul_mm_id_map1;
|
326
|
+
|
327
|
+
typedef struct {
|
305
328
|
int32_t ne00;
|
306
329
|
int32_t ne02;
|
307
330
|
uint64_t nb01;
|
308
331
|
uint64_t nb02;
|
309
|
-
|
310
|
-
int32_t
|
311
|
-
|
312
|
-
uint64_t
|
313
|
-
uint64_t
|
314
|
-
uint64_t
|
315
|
-
int32_t
|
316
|
-
int32_t
|
332
|
+
uint64_t nb03;
|
333
|
+
int32_t neh12;
|
334
|
+
uint64_t nbh10;
|
335
|
+
uint64_t nbh11;
|
336
|
+
uint64_t nbh12;
|
337
|
+
uint64_t nbh13;
|
338
|
+
int32_t neh0;
|
339
|
+
int32_t neh1;
|
340
|
+
int16_t r2;
|
341
|
+
int16_t r3;
|
317
342
|
} lm_ggml_metal_kargs_mul_mm_id;
|
318
343
|
|
319
344
|
typedef struct {
|
package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h
CHANGED
@@ -37,13 +37,16 @@ extern "C" {
|
|
37
37
|
// ====== Dataset ======
|
38
38
|
|
39
39
|
LM_GGML_API lm_ggml_opt_dataset_t lm_ggml_opt_dataset_init(
|
40
|
-
|
41
|
-
|
42
|
-
int64_t
|
43
|
-
int64_t
|
40
|
+
enum lm_ggml_type type_data, // the type for the internal data tensor
|
41
|
+
enum lm_ggml_type type_label, // the type for the internal labels tensor
|
42
|
+
int64_t ne_datapoint, // number of elements per datapoint
|
43
|
+
int64_t ne_label, // number of elements per label
|
44
|
+
int64_t ndata, // total number of datapoints/labels
|
45
|
+
int64_t ndata_shard); // number of datapoints/labels per shard (unit at which the dataset is shuffled/copied)
|
44
46
|
LM_GGML_API void lm_ggml_opt_dataset_free(lm_ggml_opt_dataset_t dataset);
|
45
47
|
|
46
48
|
// get underlying tensors that store the data
|
49
|
+
LM_GGML_API int64_t lm_ggml_opt_dataset_ndata (lm_ggml_opt_dataset_t dataset);
|
47
50
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_dataset_data (lm_ggml_opt_dataset_t dataset); // shape = [ne_datapoint, ndata]
|
48
51
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_dataset_labels(lm_ggml_opt_dataset_t dataset); // shape = [nd_label, ndata]
|
49
52
|
|
@@ -56,13 +59,19 @@ extern "C" {
|
|
56
59
|
struct lm_ggml_tensor * data_batch, // shape = [ne_datapoint, ndata_batch]
|
57
60
|
struct lm_ggml_tensor * labels_batch, // shape = [ne_label, ndata_batch]
|
58
61
|
int64_t ibatch);
|
62
|
+
LM_GGML_API void lm_ggml_opt_dataset_get_batch_host(
|
63
|
+
lm_ggml_opt_dataset_t dataset,
|
64
|
+
void * data_batch,
|
65
|
+
size_t nb_data_batch,
|
66
|
+
void * labels_batch,
|
67
|
+
int64_t ibatch);
|
59
68
|
|
60
69
|
// ====== Model / Context ======
|
61
70
|
|
62
71
|
enum lm_ggml_opt_build_type {
|
63
|
-
LM_GGML_OPT_BUILD_TYPE_FORWARD,
|
64
|
-
LM_GGML_OPT_BUILD_TYPE_GRAD,
|
65
|
-
LM_GGML_OPT_BUILD_TYPE_OPT,
|
72
|
+
LM_GGML_OPT_BUILD_TYPE_FORWARD = 10,
|
73
|
+
LM_GGML_OPT_BUILD_TYPE_GRAD = 20,
|
74
|
+
LM_GGML_OPT_BUILD_TYPE_OPT = 30,
|
66
75
|
};
|
67
76
|
|
68
77
|
// parameters that control which optimizer is used and how said optimizer tries to find the minimal loss
|
@@ -81,20 +90,22 @@ extern "C" {
|
|
81
90
|
// userdata can be used to pass arbitrary data
|
82
91
|
typedef struct lm_ggml_opt_optimizer_params (*lm_ggml_opt_get_optimizer_params)(void * userdata);
|
83
92
|
|
84
|
-
// returns the default optimizer params (constant)
|
93
|
+
// returns the default optimizer params (constant, hard-coded values)
|
85
94
|
// userdata is not used
|
86
95
|
LM_GGML_API struct lm_ggml_opt_optimizer_params lm_ggml_opt_get_default_optimizer_params(void * userdata);
|
87
96
|
|
97
|
+
// casts userdata to lm_ggml_opt_optimizer_params and returns it
|
98
|
+
LM_GGML_API struct lm_ggml_opt_optimizer_params lm_ggml_opt_get_constant_optimizer_params(void * userdata);
|
99
|
+
|
88
100
|
// parameters for initializing a new optimization context
|
89
101
|
struct lm_ggml_opt_params {
|
90
102
|
lm_ggml_backend_sched_t backend_sched; // defines which backends are used to construct the compute graphs
|
91
103
|
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
struct lm_ggml_tensor
|
97
|
-
struct lm_ggml_tensor * outputs;
|
104
|
+
// by default the forward graph needs to be reconstructed for each eval
|
105
|
+
// if ctx_compute, inputs, and outputs are set the graphs are instead allocated statically
|
106
|
+
struct lm_ggml_context * ctx_compute;
|
107
|
+
struct lm_ggml_tensor * inputs;
|
108
|
+
struct lm_ggml_tensor * outputs;
|
98
109
|
|
99
110
|
enum lm_ggml_opt_loss_type loss_type;
|
100
111
|
enum lm_ggml_opt_build_type build_type;
|
@@ -107,12 +118,9 @@ extern "C" {
|
|
107
118
|
|
108
119
|
// get parameters for an optimization context with defaults set where possible
|
109
120
|
// parameters for which no sensible defaults exist are supplied as arguments to this function
|
110
|
-
LM_GGML_API lm_ggml_opt_params lm_ggml_opt_default_params(
|
111
|
-
lm_ggml_backend_sched_t
|
112
|
-
|
113
|
-
struct lm_ggml_tensor * inputs,
|
114
|
-
struct lm_ggml_tensor * outputs,
|
115
|
-
enum lm_ggml_opt_loss_type loss_type);
|
121
|
+
LM_GGML_API struct lm_ggml_opt_params lm_ggml_opt_default_params(
|
122
|
+
lm_ggml_backend_sched_t backend_sched,
|
123
|
+
enum lm_ggml_opt_loss_type loss_type);
|
116
124
|
|
117
125
|
LM_GGML_API lm_ggml_opt_context_t lm_ggml_opt_init(struct lm_ggml_opt_params params);
|
118
126
|
LM_GGML_API void lm_ggml_opt_free(lm_ggml_opt_context_t opt_ctx);
|
@@ -120,7 +128,10 @@ extern "C" {
|
|
120
128
|
// set gradients to zero, initilize loss, and optionally reset the optimizer
|
121
129
|
LM_GGML_API void lm_ggml_opt_reset(lm_ggml_opt_context_t opt_ctx, bool optimizer);
|
122
130
|
|
131
|
+
LM_GGML_API bool lm_ggml_opt_static_graphs(lm_ggml_opt_context_t opt_ctx); // whether the graphs are allocated_statically
|
132
|
+
|
123
133
|
// get underlying tensors that store data
|
134
|
+
// if not using static graphs these pointers become invalid with the next call to lm_ggml_opt_alloc
|
124
135
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_inputs( lm_ggml_opt_context_t opt_ctx); // forward graph input tensor
|
125
136
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_outputs( lm_ggml_opt_context_t opt_ctx); // forward graph output tensor
|
126
137
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_labels( lm_ggml_opt_context_t opt_ctx); // labels to compare outputs against
|
@@ -128,11 +139,12 @@ extern "C" {
|
|
128
139
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_pred( lm_ggml_opt_context_t opt_ctx); // predictions made by outputs
|
129
140
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_ncorrect(lm_ggml_opt_context_t opt_ctx); // number of matching predictions between outputs and labels
|
130
141
|
|
142
|
+
// get the gradient accumulator for a node from the forward graph
|
131
143
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_grad_acc(lm_ggml_opt_context_t opt_ctx, struct lm_ggml_tensor * node);
|
132
144
|
|
133
145
|
// ====== Optimization Result ======
|
134
146
|
|
135
|
-
LM_GGML_API lm_ggml_opt_result_t lm_ggml_opt_result_init();
|
147
|
+
LM_GGML_API lm_ggml_opt_result_t lm_ggml_opt_result_init(void);
|
136
148
|
LM_GGML_API void lm_ggml_opt_result_free(lm_ggml_opt_result_t result);
|
137
149
|
LM_GGML_API void lm_ggml_opt_result_reset(lm_ggml_opt_result_t result);
|
138
150
|
|
@@ -144,11 +156,20 @@ extern "C" {
|
|
144
156
|
|
145
157
|
// ====== Computation ======
|
146
158
|
|
147
|
-
//
|
148
|
-
LM_GGML_API void
|
159
|
+
// if not using static graphs, this function must be called prior to lm_ggml_opt_alloc
|
160
|
+
LM_GGML_API void lm_ggml_opt_prepare_alloc(
|
161
|
+
lm_ggml_opt_context_t opt_ctx,
|
162
|
+
struct lm_ggml_context * ctx_compute,
|
163
|
+
struct lm_ggml_cgraph * gf,
|
164
|
+
struct lm_ggml_tensor * inputs,
|
165
|
+
struct lm_ggml_tensor * outputs);
|
166
|
+
|
167
|
+
// allocate the next graph for evaluation, either forward or forward + backward
|
168
|
+
// must be called exactly once prior to calling lm_ggml_opt_eval
|
169
|
+
LM_GGML_API void lm_ggml_opt_alloc(lm_ggml_opt_context_t opt_ctx, bool backward);
|
149
170
|
|
150
|
-
// do forward pass, increment result if not NULL, do backward pass
|
151
|
-
LM_GGML_API void
|
171
|
+
// do forward pass, increment result if not NULL, do backward pass if allocated
|
172
|
+
LM_GGML_API void lm_ggml_opt_eval(lm_ggml_opt_context_t opt_ctx, lm_ggml_opt_result_t result);
|
152
173
|
|
153
174
|
// ############################################################################
|
154
175
|
// ## The high-level functions start here. They do not depend on any private ##
|
@@ -200,9 +221,9 @@ extern "C" {
|
|
200
221
|
// fit model defined by inputs and outputs to dataset
|
201
222
|
LM_GGML_API void lm_ggml_opt_fit(
|
202
223
|
lm_ggml_backend_sched_t backend_sched, // backend scheduler for constructing the compute graphs
|
203
|
-
lm_ggml_context
|
204
|
-
lm_ggml_tensor
|
205
|
-
lm_ggml_tensor
|
224
|
+
struct lm_ggml_context * ctx_compute, // context with temporarily allocated tensors to calculate the outputs
|
225
|
+
struct lm_ggml_tensor * inputs, // input tensor with shape [ne_datapoint, ndata_batch]
|
226
|
+
struct lm_ggml_tensor * outputs, // output tensor, must have shape [ne_label, ndata_batch] if labels are used
|
206
227
|
lm_ggml_opt_dataset_t dataset, // dataset with data and optionally also labels
|
207
228
|
enum lm_ggml_opt_loss_type loss_type, // loss to minimize
|
208
229
|
lm_ggml_opt_get_optimizer_params get_opt_pars, // callback to get optimizer params, userdata is pointer to epoch (of type int64_t)
|