cui-llama.rn 1.7.3 → 1.7.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +217 -17
- package/android/src/main/CMakeLists.txt +34 -15
- package/android/src/main/java/com/rnllama/LlamaContext.java +94 -8
- package/android/src/main/java/com/rnllama/RNLlama.java +247 -0
- package/android/src/main/jni.cpp +213 -14
- package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
- package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +35 -0
- package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +34 -0
- package/cpp/README.md +1 -1
- package/cpp/chat-parser.cpp +385 -0
- package/cpp/chat-parser.h +120 -0
- package/cpp/chat.cpp +726 -596
- package/cpp/chat.h +71 -6
- package/cpp/common.cpp +56 -38
- package/cpp/common.h +9 -3
- package/cpp/ggml-backend-reg.cpp +5 -0
- package/cpp/ggml-backend.cpp +10 -2
- package/cpp/ggml-common.h +4 -0
- package/cpp/ggml-cpu/amx/amx.cpp +1 -1
- package/cpp/ggml-cpu/amx/mmq.cpp +11 -10
- package/cpp/ggml-cpu/arch/arm/cpu-feats.cpp +94 -0
- package/cpp/ggml-cpu/arch/arm/quants.c +4114 -0
- package/cpp/ggml-cpu/arch/arm/repack.cpp +2163 -0
- package/cpp/ggml-cpu/arch/x86/cpu-feats.cpp +327 -0
- package/cpp/ggml-cpu/arch/x86/quants.c +4311 -0
- package/cpp/ggml-cpu/{ggml-cpu-aarch64.cpp → arch/x86/repack.cpp} +79 -3225
- package/cpp/ggml-cpu/arch-fallback.h +184 -0
- package/cpp/ggml-cpu/common.h +4 -3
- package/cpp/ggml-cpu/ggml-cpu-impl.h +21 -16
- package/cpp/ggml-cpu/ggml-cpu.c +123 -104
- package/cpp/ggml-cpu/ggml-cpu.cpp +11 -8
- package/cpp/ggml-cpu/ops.cpp +330 -148
- package/cpp/ggml-cpu/ops.h +1 -0
- package/cpp/ggml-cpu/quants.c +1158 -0
- package/cpp/ggml-cpu/{ggml-cpu-quants.h → quants.h} +26 -0
- package/cpp/ggml-cpu/repack.cpp +1571 -0
- package/cpp/ggml-cpu/repack.h +98 -0
- package/cpp/ggml-cpu/simd-mappings.h +330 -38
- package/cpp/ggml-cpu/{ggml-cpu-traits.cpp → traits.cpp} +1 -1
- package/cpp/ggml-cpu/vec.cpp +87 -18
- package/cpp/ggml-cpu/vec.h +249 -94
- package/cpp/ggml-cpu.h +1 -0
- package/cpp/ggml-impl.h +63 -183
- package/cpp/ggml-llama-sim.metallib +0 -0
- package/cpp/ggml-llama.metallib +0 -0
- package/cpp/ggml-metal.m +152 -45
- package/cpp/ggml-quants.c +0 -2
- package/cpp/ggml.c +61 -21
- package/cpp/ggml.h +22 -3
- package/cpp/gguf.cpp +24 -3
- package/cpp/json-partial.cpp +256 -0
- package/cpp/json-partial.h +38 -0
- package/cpp/json-schema-to-grammar.cpp +5 -47
- package/cpp/json-schema-to-grammar.h +4 -4
- package/cpp/llama-arch.cpp +153 -3
- package/cpp/llama-arch.h +27 -1
- package/cpp/llama-batch.cpp +741 -272
- package/cpp/llama-batch.h +112 -54
- package/cpp/llama-chat.cpp +30 -8
- package/cpp/llama-chat.h +1 -0
- package/cpp/llama-context.cpp +524 -339
- package/cpp/llama-context.h +38 -17
- package/cpp/llama-cparams.cpp +4 -0
- package/cpp/llama-cparams.h +2 -0
- package/cpp/llama-grammar.cpp +12 -2
- package/cpp/llama-graph.cpp +431 -356
- package/cpp/llama-graph.h +126 -58
- package/cpp/llama-hparams.cpp +10 -2
- package/cpp/llama-hparams.h +19 -2
- package/cpp/llama-kv-cache-unified-iswa.cpp +279 -0
- package/cpp/llama-kv-cache-unified-iswa.h +128 -0
- package/cpp/llama-kv-cache-unified.cpp +1841 -0
- package/cpp/llama-kv-cache-unified.h +303 -0
- package/cpp/llama-kv-cells.h +439 -0
- package/cpp/llama-memory-hybrid.cpp +246 -0
- package/cpp/llama-memory-hybrid.h +138 -0
- package/cpp/llama-memory-recurrent.cpp +1112 -0
- package/cpp/llama-memory-recurrent.h +183 -0
- package/cpp/llama-memory.cpp +41 -0
- package/cpp/llama-memory.h +86 -5
- package/cpp/llama-mmap.cpp +1 -1
- package/cpp/llama-model-loader.cpp +42 -17
- package/cpp/llama-model-saver.cpp +1 -0
- package/cpp/llama-model.cpp +1639 -513
- package/cpp/llama-model.h +26 -0
- package/cpp/llama-sampling.cpp +2 -2
- package/cpp/llama-vocab.cpp +65 -28
- package/cpp/llama-vocab.h +1 -0
- package/cpp/llama.cpp +11 -7
- package/cpp/llama.h +150 -42
- package/cpp/minja/chat-template.hpp +1 -1
- package/cpp/minja/minja.hpp +1 -1
- package/cpp/{json.hpp → nlohmann/json.hpp} +3027 -2267
- package/cpp/nlohmann/json_fwd.hpp +187 -0
- package/cpp/regex-partial.cpp +204 -0
- package/cpp/regex-partial.h +56 -0
- package/cpp/rn-llama.cpp +646 -35
- package/cpp/rn-llama.h +32 -1
- package/cpp/rn-tts.h +39 -0
- package/cpp/sampling.cpp +7 -8
- package/cpp/tools/mtmd/clip-impl.h +5 -0
- package/cpp/tools/mtmd/clip.cpp +572 -436
- package/cpp/tools/mtmd/clip.h +14 -4
- package/cpp/tools/mtmd/mtmd-audio.cpp +0 -86
- package/cpp/tools/mtmd/mtmd-audio.h +2 -17
- package/cpp/tools/mtmd/mtmd-helper.cpp +175 -12
- package/cpp/tools/mtmd/mtmd-helper.h +91 -0
- package/cpp/tools/mtmd/mtmd.cpp +368 -248
- package/cpp/tools/mtmd/mtmd.h +6 -70
- package/cpp/unicode.cpp +5 -0
- package/ios/CMakeLists.txt +26 -6
- package/ios/RNLlama.h +1 -1
- package/ios/RNLlama.mm +153 -3
- package/ios/RNLlamaContext.h +9 -1
- package/ios/RNLlamaContext.mm +112 -9
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/chat-parser.h +120 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/chat.h +71 -6
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/common.h +9 -3
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-common.h +4 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu.h +1 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-impl.h +63 -183
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml.h +22 -3
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/json-partial.h +38 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/json-schema-to-grammar.h +4 -4
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-arch.h +27 -1
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-batch.h +112 -54
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-chat.h +1 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-context.h +38 -17
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cparams.h +2 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-graph.h +126 -58
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-hparams.h +19 -2
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache-unified-iswa.h +128 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache-unified.h +303 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cells.h +439 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory-hybrid.h +138 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory-recurrent.h +183 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory.h +86 -5
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model.h +26 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-vocab.h +1 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama.h +150 -42
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/chat-template.hpp +1 -1
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/minja.hpp +1 -1
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/{json.hpp → nlohmann/json.hpp} +3027 -2267
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/nlohmann/json_fwd.hpp +187 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/regex-partial.h +56 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/rn-llama.h +32 -1
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/rn-tts.h +39 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/ggml-llama.metallib +0 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/chat-parser.h +120 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +71 -6
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +9 -3
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-common.h +4 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +1 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +63 -183
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +22 -3
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/json-partial.h +38 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/json-schema-to-grammar.h +4 -4
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +27 -1
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +112 -54
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +1 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +38 -17
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +2 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +126 -58
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +19 -2
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache-unified-iswa.h +128 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache-unified.h +303 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cells.h +439 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory-hybrid.h +138 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory-recurrent.h +183 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +86 -5
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +26 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +1 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +150 -42
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +1 -1
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +1 -1
- package/ios/rnllama.xcframework/{tvos-arm64/rnllama.framework/Headers → ios-arm64_x86_64-simulator/rnllama.framework/Headers/nlohmann}/json.hpp +3027 -2267
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/nlohmann/json_fwd.hpp +187 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/regex-partial.h +56 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +32 -1
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/rn-tts.h +39 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/chat-parser.h +120 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/chat.h +71 -6
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/common.h +9 -3
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-common.h +4 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu.h +1 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-impl.h +63 -183
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml.h +22 -3
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/json-partial.h +38 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/json-schema-to-grammar.h +4 -4
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-arch.h +27 -1
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-batch.h +112 -54
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-chat.h +1 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-context.h +38 -17
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cparams.h +2 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-graph.h +126 -58
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-hparams.h +19 -2
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache-unified-iswa.h +128 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache-unified.h +303 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cells.h +439 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory-hybrid.h +138 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory-recurrent.h +183 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory.h +86 -5
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model.h +26 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-vocab.h +1 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama.h +150 -42
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/chat-template.hpp +1 -1
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/minja.hpp +1 -1
- package/ios/rnllama.xcframework/{ios-arm64_x86_64-simulator/rnllama.framework/Headers → tvos-arm64/rnllama.framework/Headers/nlohmann}/json.hpp +3027 -2267
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/nlohmann/json_fwd.hpp +187 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/regex-partial.h +56 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/rn-llama.h +32 -1
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/rn-tts.h +39 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/ggml-llama.metallib +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/chat-parser.h +120 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +71 -6
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +9 -3
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-common.h +4 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +1 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +63 -183
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +22 -3
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/json-partial.h +38 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/json-schema-to-grammar.h +4 -4
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +27 -1
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +112 -54
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +1 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +38 -17
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +2 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +126 -58
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +19 -2
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache-unified-iswa.h +128 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache-unified.h +303 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cells.h +439 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory-hybrid.h +138 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory-recurrent.h +183 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +86 -5
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +26 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +1 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +150 -42
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +1 -1
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +1 -1
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/nlohmann/json.hpp +25526 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/nlohmann/json_fwd.hpp +187 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/regex-partial.h +56 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +32 -1
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/rn-tts.h +39 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
- package/jest/mock.js +24 -0
- package/package.json +1 -1
- package/src/NativeRNLlama.ts +46 -2
- package/src/index.ts +105 -1
- package/cpp/ggml-cpu/ggml-cpu-aarch64.h +0 -8
- package/cpp/ggml-cpu/ggml-cpu-quants.c +0 -13326
- package/cpp/ggml-cpu/sgemm.cpp +0 -3544
- package/cpp/ggml-cpu/sgemm.h +0 -14
- package/cpp/llama-kv-cache.cpp +0 -2827
- package/cpp/llama-kv-cache.h +0 -515
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache.h +0 -515
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +0 -515
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache.h +0 -515
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/json.hpp +0 -24766
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +0 -515
- /package/cpp/ggml-cpu/{ggml-cpu-traits.h → traits.h} +0 -0
- /package/cpp/tools/mtmd/{miniaudio.h → miniaudio/miniaudio.h} +0 -0
- /package/cpp/tools/mtmd/{stb_image.h → stb/stb_image.h} +0 -0
@@ -0,0 +1,183 @@
|
|
1
|
+
#pragma once
|
2
|
+
|
3
|
+
#include "llama-batch.h"
|
4
|
+
#include "llama-graph.h"
|
5
|
+
#include "llama-memory.h"
|
6
|
+
|
7
|
+
#include <set>
|
8
|
+
#include <vector>
|
9
|
+
|
10
|
+
//
|
11
|
+
// llama_memory_recurrent
|
12
|
+
//
|
13
|
+
|
14
|
+
// TODO: extract the cache state used for graph computation into llama_memory_recurrent_context_i
|
15
|
+
// see the implementation of llama_kv_cache_unified_context_i for an example how to do it
|
16
|
+
class llama_memory_recurrent : public llama_memory_i {
|
17
|
+
public:
|
18
|
+
|
19
|
+
// this callback is used to filter out layers that should not be included in the cache
|
20
|
+
using layer_filter_cb = std::function<bool(int32_t il)>;
|
21
|
+
|
22
|
+
llama_memory_recurrent(
|
23
|
+
const llama_model & model,
|
24
|
+
layer_filter_cb && filter,
|
25
|
+
lm_ggml_type type_r,
|
26
|
+
lm_ggml_type type_s,
|
27
|
+
bool offload,
|
28
|
+
uint32_t mem_size,
|
29
|
+
uint32_t n_seq_max);
|
30
|
+
|
31
|
+
~llama_memory_recurrent() = default;
|
32
|
+
|
33
|
+
//
|
34
|
+
// llama_memory_i
|
35
|
+
//
|
36
|
+
|
37
|
+
llama_memory_context_ptr init_batch(
|
38
|
+
llama_batch_allocr & balloc,
|
39
|
+
uint32_t n_ubatch,
|
40
|
+
bool embd_all) override;
|
41
|
+
|
42
|
+
llama_memory_context_ptr init_full() override;
|
43
|
+
|
44
|
+
llama_memory_context_ptr init_update(llama_context * lctx, bool optimize) override;
|
45
|
+
|
46
|
+
void clear(bool data) override;
|
47
|
+
|
48
|
+
bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
|
49
|
+
void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
|
50
|
+
void seq_keep(llama_seq_id seq_id) override;
|
51
|
+
void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) override;
|
52
|
+
void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
|
53
|
+
|
54
|
+
llama_pos seq_pos_min(llama_seq_id seq_id) const override;
|
55
|
+
llama_pos seq_pos_max(llama_seq_id seq_id) const override;
|
56
|
+
|
57
|
+
bool prepare(const std::vector<llama_ubatch> & ubatches);
|
58
|
+
|
59
|
+
// find a contiguous slot of memory cells and emplace the ubatch there
|
60
|
+
bool find_slot(const llama_ubatch & ubatch);
|
61
|
+
|
62
|
+
bool get_can_shift() const override;
|
63
|
+
|
64
|
+
// state write/load
|
65
|
+
|
66
|
+
void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
|
67
|
+
void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
|
68
|
+
|
69
|
+
uint32_t head = 0; // the location where the batch will be placed in the cache (see find_slot())
|
70
|
+
uint32_t size = 0; // total number of cells, shared across all sequences
|
71
|
+
uint32_t used = 0; // used cells (i.e. at least one seq_id)
|
72
|
+
|
73
|
+
// computed before each graph build
|
74
|
+
uint32_t n = 0;
|
75
|
+
|
76
|
+
// first zero-ed state
|
77
|
+
int32_t rs_z = -1;
|
78
|
+
|
79
|
+
// TODO: optimize for recurrent state needs
|
80
|
+
struct mem_cell {
|
81
|
+
llama_pos pos = -1;
|
82
|
+
int32_t src = -1; // used to know where states should be copied from
|
83
|
+
int32_t src0 = -1; // like src, but only used when setting the inputs (allowing to copy once)
|
84
|
+
int32_t tail = -1;
|
85
|
+
|
86
|
+
std::set<llama_seq_id> seq_id;
|
87
|
+
|
88
|
+
bool has_seq_id(const llama_seq_id & id) const {
|
89
|
+
return seq_id.find(id) != seq_id.end();
|
90
|
+
}
|
91
|
+
|
92
|
+
bool is_empty() const {
|
93
|
+
return seq_id.empty();
|
94
|
+
}
|
95
|
+
|
96
|
+
bool is_same_seq(const mem_cell & other) const {
|
97
|
+
return seq_id == other.seq_id;
|
98
|
+
}
|
99
|
+
};
|
100
|
+
|
101
|
+
std::vector<mem_cell> cells;
|
102
|
+
|
103
|
+
// per layer
|
104
|
+
std::vector<lm_ggml_tensor *> r_l;
|
105
|
+
std::vector<lm_ggml_tensor *> s_l;
|
106
|
+
|
107
|
+
private:
|
108
|
+
//const llama_model & model;
|
109
|
+
const llama_hparams & hparams;
|
110
|
+
|
111
|
+
const uint32_t n_seq_max = 1;
|
112
|
+
|
113
|
+
std::vector<lm_ggml_context_ptr> ctxs;
|
114
|
+
std::vector<lm_ggml_backend_buffer_ptr> bufs;
|
115
|
+
|
116
|
+
size_t total_size() const;
|
117
|
+
|
118
|
+
size_t size_r_bytes() const;
|
119
|
+
size_t size_s_bytes() const;
|
120
|
+
|
121
|
+
void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
|
122
|
+
void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
|
123
|
+
|
124
|
+
bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
|
125
|
+
bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
|
126
|
+
};
|
127
|
+
|
128
|
+
class llama_memory_recurrent_context : public llama_memory_context_i {
|
129
|
+
public:
|
130
|
+
// used for errors
|
131
|
+
llama_memory_recurrent_context(llama_memory_status status);
|
132
|
+
|
133
|
+
// used to create a full-cache or update context
|
134
|
+
llama_memory_recurrent_context(
|
135
|
+
llama_memory_recurrent * mem);
|
136
|
+
|
137
|
+
// used to create a batch processing context from a batch
|
138
|
+
llama_memory_recurrent_context(
|
139
|
+
llama_memory_recurrent * mem,
|
140
|
+
std::vector<llama_ubatch> ubatches);
|
141
|
+
|
142
|
+
virtual ~llama_memory_recurrent_context();
|
143
|
+
|
144
|
+
//
|
145
|
+
// llama_memory_context_i
|
146
|
+
//
|
147
|
+
|
148
|
+
bool next() override;
|
149
|
+
bool apply() override;
|
150
|
+
|
151
|
+
llama_memory_status get_status() const override;
|
152
|
+
const llama_ubatch & get_ubatch() const override;
|
153
|
+
|
154
|
+
//
|
155
|
+
// llama_memory_recurrent_context specific API
|
156
|
+
//
|
157
|
+
|
158
|
+
uint32_t get_n_rs() const;
|
159
|
+
uint32_t get_head() const;
|
160
|
+
int32_t get_rs_z() const;
|
161
|
+
uint32_t get_size() const;
|
162
|
+
|
163
|
+
lm_ggml_tensor * get_r_l(int32_t il) const;
|
164
|
+
lm_ggml_tensor * get_s_l(int32_t il) const;
|
165
|
+
|
166
|
+
int32_t s_copy(int i) const;
|
167
|
+
|
168
|
+
private:
|
169
|
+
const llama_memory_status status;
|
170
|
+
|
171
|
+
llama_memory_recurrent * mem;
|
172
|
+
|
173
|
+
size_t i_next = 0;
|
174
|
+
|
175
|
+
std::vector<llama_ubatch> ubatches;
|
176
|
+
|
177
|
+
//
|
178
|
+
// data needed for building the compute graph for the current ubatch:
|
179
|
+
// TODO: extract all the state like `head` and `n` here
|
180
|
+
//
|
181
|
+
|
182
|
+
const bool is_full = false;
|
183
|
+
};
|
@@ -2,6 +2,15 @@
|
|
2
2
|
|
3
3
|
#include "llama.h"
|
4
4
|
|
5
|
+
#include <memory>
|
6
|
+
|
7
|
+
struct llama_ubatch;
|
8
|
+
|
9
|
+
class llama_batch_allocr;
|
10
|
+
|
11
|
+
class llama_io_write_i;
|
12
|
+
class llama_io_read_i;
|
13
|
+
|
5
14
|
struct llama_memory_params {
|
6
15
|
// kv cache
|
7
16
|
lm_ggml_type type_k;
|
@@ -11,22 +20,94 @@ struct llama_memory_params {
|
|
11
20
|
bool swa_full;
|
12
21
|
};
|
13
22
|
|
23
|
+
enum llama_memory_status {
|
24
|
+
LLAMA_MEMORY_STATUS_SUCCESS = 0,
|
25
|
+
LLAMA_MEMORY_STATUS_NO_UPDATE,
|
26
|
+
LLAMA_MEMORY_STATUS_FAILED_PREPARE,
|
27
|
+
LLAMA_MEMORY_STATUS_FAILED_COMPUTE,
|
28
|
+
};
|
29
|
+
|
30
|
+
// helper function for combining the status of two memory contexts
|
31
|
+
// useful for implementing hybrid memory types (e.g. iSWA)
|
32
|
+
llama_memory_status llama_memory_status_combine(llama_memory_status s0, llama_memory_status s1);
|
33
|
+
|
34
|
+
// the interface for managing the memory context during batch processing
|
35
|
+
// this interface is implemented per memory type. see:
|
36
|
+
// - llama_kv_cache_unified_context
|
37
|
+
// - llama_kv_cache_unified_iswa_context
|
38
|
+
// ...
|
39
|
+
//
|
40
|
+
// the only method that should mutate the memory and the memory context is llama_memory_i::apply()
|
41
|
+
struct llama_memory_context_i {
|
42
|
+
virtual ~llama_memory_context_i() = default;
|
43
|
+
|
44
|
+
// consume the current ubatch from the context and proceed to the next one
|
45
|
+
// return false if we are done
|
46
|
+
virtual bool next() = 0;
|
47
|
+
|
48
|
+
// apply the memory state for the current ubatch to the memory object
|
49
|
+
// return false on failure
|
50
|
+
virtual bool apply() = 0;
|
51
|
+
|
52
|
+
// get the current ubatch
|
53
|
+
virtual const llama_ubatch & get_ubatch() const = 0;
|
54
|
+
|
55
|
+
// get the status of the memory context - used for error handling and checking if any updates would be applied
|
56
|
+
virtual llama_memory_status get_status() const = 0;
|
57
|
+
};
|
58
|
+
|
59
|
+
using llama_memory_context_ptr = std::unique_ptr<llama_memory_context_i>;
|
60
|
+
|
14
61
|
// general concept of LLM memory
|
15
62
|
// the KV cache is a type of LLM memory, but there can be other types
|
16
|
-
|
17
|
-
public:
|
63
|
+
struct llama_memory_i {
|
18
64
|
virtual ~llama_memory_i() = default;
|
19
65
|
|
20
|
-
|
66
|
+
// split the input batch into a set of ubatches and verify that they can fit into the cache
|
67
|
+
// return a context object containing the ubatches and memory state required to process them
|
68
|
+
// check the llama_memory_context_i::get_status() for the result
|
69
|
+
virtual llama_memory_context_ptr init_batch(
|
70
|
+
llama_batch_allocr & balloc,
|
71
|
+
uint32_t n_ubatch,
|
72
|
+
bool embd_all) = 0;
|
73
|
+
|
74
|
+
// simulate full cache, used for allocating worst-case compute buffers
|
75
|
+
virtual llama_memory_context_ptr init_full() = 0;
|
76
|
+
|
77
|
+
// prepare for any pending memory updates, such as shifts, defrags, etc.
|
78
|
+
// status == LLAMA_MEMORY_STATUS_NO_UPDATE if there is nothing to update
|
79
|
+
virtual llama_memory_context_ptr init_update(llama_context * lctx, bool optimize) = 0;
|
80
|
+
|
81
|
+
// getters
|
82
|
+
virtual bool get_can_shift() const = 0;
|
83
|
+
|
84
|
+
//
|
85
|
+
// ops
|
86
|
+
//
|
87
|
+
|
88
|
+
// if data == true, the data buffers will also be cleared together with the metadata
|
89
|
+
virtual void clear(bool data) = 0;
|
21
90
|
|
22
91
|
virtual bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) = 0;
|
23
92
|
virtual void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) = 0;
|
24
93
|
virtual void seq_keep(llama_seq_id seq_id) = 0;
|
25
|
-
virtual void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos
|
94
|
+
virtual void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) = 0;
|
26
95
|
virtual void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) = 0;
|
27
96
|
|
28
97
|
virtual llama_pos seq_pos_min(llama_seq_id seq_id) const = 0;
|
29
98
|
virtual llama_pos seq_pos_max(llama_seq_id seq_id) const = 0;
|
30
99
|
|
31
|
-
|
100
|
+
//
|
101
|
+
// state write/read
|
102
|
+
//
|
103
|
+
|
104
|
+
virtual void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const = 0;
|
105
|
+
virtual void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) = 0;
|
106
|
+
};
|
107
|
+
|
108
|
+
using llama_memory_ptr = std::unique_ptr<llama_memory_i>;
|
109
|
+
|
110
|
+
// TODO: temporary until the llama_kv_cache is removed from the public API
|
111
|
+
struct llama_kv_cache : public llama_memory_i {
|
112
|
+
virtual ~llama_kv_cache() = default;
|
32
113
|
};
|
@@ -73,6 +73,7 @@ enum llm_type {
|
|
73
73
|
LLM_TYPE_40B,
|
74
74
|
LLM_TYPE_65B,
|
75
75
|
LLM_TYPE_70B,
|
76
|
+
LLM_TYPE_142B,
|
76
77
|
LLM_TYPE_236B,
|
77
78
|
LLM_TYPE_290B,
|
78
79
|
LLM_TYPE_314B,
|
@@ -94,6 +95,8 @@ enum llm_type {
|
|
94
95
|
LLM_TYPE_17B_128E, // llama4 Maverick
|
95
96
|
LLM_TYPE_30B_A3B,
|
96
97
|
LLM_TYPE_235B_A22B,
|
98
|
+
LLM_TYPE_E2B,
|
99
|
+
LLM_TYPE_E4B,
|
97
100
|
};
|
98
101
|
|
99
102
|
std::string llama_rope_scaling_type_name(llama_rope_scaling_type rope_scaling_type);
|
@@ -315,6 +318,19 @@ struct llama_layer {
|
|
315
318
|
struct lm_ggml_tensor * ffn_up_scale = nullptr;
|
316
319
|
struct lm_ggml_tensor * ffn_down_scale = nullptr;
|
317
320
|
|
321
|
+
// altup & laurel
|
322
|
+
struct lm_ggml_tensor * per_layer_inp_gate = nullptr;
|
323
|
+
struct lm_ggml_tensor * per_layer_proj = nullptr;
|
324
|
+
struct lm_ggml_tensor * per_layer_post_norm = nullptr;
|
325
|
+
struct lm_ggml_tensor * altup_correct_coef = nullptr;
|
326
|
+
struct lm_ggml_tensor * altup_correct_scale = nullptr;
|
327
|
+
struct lm_ggml_tensor * altup_predict_coef = nullptr;
|
328
|
+
struct lm_ggml_tensor * altup_router = nullptr;
|
329
|
+
struct lm_ggml_tensor * altup_router_norm = nullptr;
|
330
|
+
struct lm_ggml_tensor * laurel_l = nullptr;
|
331
|
+
struct lm_ggml_tensor * laurel_r = nullptr;
|
332
|
+
struct lm_ggml_tensor * laurel_post_norm = nullptr;
|
333
|
+
|
318
334
|
struct llama_layer_posnet posnet;
|
319
335
|
|
320
336
|
struct llama_layer_convnext convnext;
|
@@ -329,6 +345,9 @@ struct llama_model {
|
|
329
345
|
llama_hparams hparams = {};
|
330
346
|
llama_vocab vocab;
|
331
347
|
|
348
|
+
// for classifier models
|
349
|
+
std::vector<std::string> classifier_labels;
|
350
|
+
|
332
351
|
struct lm_ggml_tensor * tok_embd = nullptr;
|
333
352
|
struct lm_ggml_tensor * type_embd = nullptr;
|
334
353
|
struct lm_ggml_tensor * pos_embd = nullptr;
|
@@ -350,6 +369,13 @@ struct llama_model {
|
|
350
369
|
struct lm_ggml_tensor * conv1d = nullptr;
|
351
370
|
struct lm_ggml_tensor * conv1d_b = nullptr;
|
352
371
|
|
372
|
+
// gemma3n altup
|
373
|
+
struct lm_ggml_tensor * tok_embd_per_layer = nullptr;
|
374
|
+
struct lm_ggml_tensor * altup_proj = nullptr;
|
375
|
+
struct lm_ggml_tensor * altup_unembd_proj = nullptr;
|
376
|
+
struct lm_ggml_tensor * per_layer_model_proj = nullptr;
|
377
|
+
struct lm_ggml_tensor * per_layer_proj_norm = nullptr;
|
378
|
+
|
353
379
|
std::vector<llama_layer> layers;
|
354
380
|
|
355
381
|
llama_model_params params;
|
@@ -74,6 +74,7 @@ struct llama_vocab {
|
|
74
74
|
bool get_add_space_prefix () const;
|
75
75
|
bool get_add_bos () const;
|
76
76
|
bool get_add_eos () const;
|
77
|
+
bool get_add_sep () const;
|
77
78
|
bool get_ignore_merges () const;
|
78
79
|
bool get_clean_spaces () const;
|
79
80
|
bool get_remove_extra_whitespaces () const;
|